1 // SPDX-License-Identifier: GPL-2.0
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
6 * A note on the read/write ordering memory barriers that are matched between
7 * the application and kernel side.
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqring (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
30 * Also see the examples in the liburing library:
32 * git://git.kernel.dk/liburing
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
39 * Copyright (C) 2018-2019 Jens Axboe
40 * Copyright (c) 2018-2019 Christoph Hellwig
42 #include <linux/kernel.h>
43 #include <linux/init.h>
44 #include <linux/errno.h>
45 #include <linux/syscalls.h>
46 #include <linux/compat.h>
47 #include <linux/refcount.h>
48 #include <linux/uio.h>
49 #include <linux/bits.h>
51 #include <linux/sched/signal.h>
53 #include <linux/file.h>
54 #include <linux/fdtable.h>
56 #include <linux/mman.h>
57 #include <linux/mmu_context.h>
58 #include <linux/percpu.h>
59 #include <linux/slab.h>
60 #include <linux/kthread.h>
61 #include <linux/blkdev.h>
62 #include <linux/bvec.h>
63 #include <linux/net.h>
65 #include <net/af_unix.h>
67 #include <linux/anon_inodes.h>
68 #include <linux/sched/mm.h>
69 #include <linux/uaccess.h>
70 #include <linux/nospec.h>
71 #include <linux/sizes.h>
72 #include <linux/hugetlb.h>
73 #include <linux/highmem.h>
74 #include <linux/namei.h>
75 #include <linux/fsnotify.h>
76 #include <linux/fadvise.h>
77 #include <linux/eventpoll.h>
78 #include <linux/fs_struct.h>
80 #define CREATE_TRACE_POINTS
81 #include <trace/events/io_uring.h>
83 #include <uapi/linux/io_uring.h>
88 #define IORING_MAX_ENTRIES 32768
89 #define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
92 * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
94 #define IORING_FILE_TABLE_SHIFT 9
95 #define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
96 #define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
97 #define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
100 u32 head ____cacheline_aligned_in_smp
;
101 u32 tail ____cacheline_aligned_in_smp
;
105 * This data is shared with the application through the mmap at offsets
106 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
108 * The offsets to the member fields are published through struct
109 * io_sqring_offsets when calling io_uring_setup.
113 * Head and tail offsets into the ring; the offsets need to be
114 * masked to get valid indices.
116 * The kernel controls head of the sq ring and the tail of the cq ring,
117 * and the application controls tail of the sq ring and the head of the
120 struct io_uring sq
, cq
;
122 * Bitmasks to apply to head and tail offsets (constant, equals
125 u32 sq_ring_mask
, cq_ring_mask
;
126 /* Ring sizes (constant, power of 2) */
127 u32 sq_ring_entries
, cq_ring_entries
;
129 * Number of invalid entries dropped by the kernel due to
130 * invalid index stored in array
132 * Written by the kernel, shouldn't be modified by the
133 * application (i.e. get number of "new events" by comparing to
136 * After a new SQ head value was read by the application this
137 * counter includes all submissions that were dropped reaching
138 * the new SQ head (and possibly more).
144 * Written by the kernel, shouldn't be modified by the
147 * The application needs a full memory barrier before checking
148 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
152 * Number of completion events lost because the queue was full;
153 * this should be avoided by the application by making sure
154 * there are not more requests pending than there is space in
155 * the completion queue.
157 * Written by the kernel, shouldn't be modified by the
158 * application (i.e. get number of "new events" by comparing to
161 * As completion events come in out of order this counter is not
162 * ordered with any other data.
166 * Ring buffer of completion events.
168 * The kernel writes completion events fresh every time they are
169 * produced, so the application is allowed to modify pending
172 struct io_uring_cqe cqes
[] ____cacheline_aligned_in_smp
;
175 struct io_mapped_ubuf
{
178 struct bio_vec
*bvec
;
179 unsigned int nr_bvecs
;
182 struct fixed_file_table
{
186 struct fixed_file_data
{
187 struct fixed_file_table
*table
;
188 struct io_ring_ctx
*ctx
;
190 struct percpu_ref refs
;
191 struct llist_head put_llist
;
192 struct work_struct ref_work
;
193 struct completion done
;
198 struct percpu_ref refs
;
199 } ____cacheline_aligned_in_smp
;
203 unsigned int compat
: 1;
204 unsigned int account_mem
: 1;
205 unsigned int cq_overflow_flushed
: 1;
206 unsigned int drain_next
: 1;
207 unsigned int eventfd_async
: 1;
210 * Ring buffer of indices into array of io_uring_sqe, which is
211 * mmapped by the application using the IORING_OFF_SQES offset.
213 * This indirection could e.g. be used to assign fixed
214 * io_uring_sqe entries to operations and only submit them to
215 * the queue when needed.
217 * The kernel modifies neither the indices array nor the entries
221 unsigned cached_sq_head
;
224 unsigned sq_thread_idle
;
225 unsigned cached_sq_dropped
;
226 atomic_t cached_cq_overflow
;
227 unsigned long sq_check_overflow
;
229 struct list_head defer_list
;
230 struct list_head timeout_list
;
231 struct list_head cq_overflow_list
;
233 wait_queue_head_t inflight_wait
;
234 struct io_uring_sqe
*sq_sqes
;
235 } ____cacheline_aligned_in_smp
;
237 struct io_rings
*rings
;
241 struct task_struct
*sqo_thread
; /* if using sq thread polling */
242 struct mm_struct
*sqo_mm
;
243 wait_queue_head_t sqo_wait
;
246 * If used, fixed file set. Writers must ensure that ->refs is dead,
247 * readers must ensure that ->refs is alive as long as the file* is
248 * used. Only updated through io_uring_register(2).
250 struct fixed_file_data
*file_data
;
251 unsigned nr_user_files
;
253 struct file
*ring_file
;
255 /* if used, fixed mapped user buffers */
256 unsigned nr_user_bufs
;
257 struct io_mapped_ubuf
*user_bufs
;
259 struct user_struct
*user
;
261 const struct cred
*creds
;
263 /* 0 is for ctx quiesce/reinit/free, 1 is for sqo_thread started */
264 struct completion
*completions
;
266 /* if all else fails... */
267 struct io_kiocb
*fallback_req
;
269 #if defined(CONFIG_UNIX)
270 struct socket
*ring_sock
;
273 struct idr personality_idr
;
276 unsigned cached_cq_tail
;
279 atomic_t cq_timeouts
;
280 unsigned long cq_check_overflow
;
281 struct wait_queue_head cq_wait
;
282 struct fasync_struct
*cq_fasync
;
283 struct eventfd_ctx
*cq_ev_fd
;
284 } ____cacheline_aligned_in_smp
;
287 struct mutex uring_lock
;
288 wait_queue_head_t wait
;
289 } ____cacheline_aligned_in_smp
;
292 spinlock_t completion_lock
;
293 struct llist_head poll_llist
;
296 * ->poll_list is protected by the ctx->uring_lock for
297 * io_uring instances that don't use IORING_SETUP_SQPOLL.
298 * For SQPOLL, only the single threaded io_sq_thread() will
299 * manipulate the list, hence no extra locking is needed there.
301 struct list_head poll_list
;
302 struct hlist_head
*cancel_hash
;
303 unsigned cancel_hash_bits
;
304 bool poll_multi_file
;
306 spinlock_t inflight_lock
;
307 struct list_head inflight_list
;
308 } ____cacheline_aligned_in_smp
;
312 * First field must be the file pointer in all the
313 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
315 struct io_poll_iocb
{
318 struct wait_queue_head
*head
;
324 struct wait_queue_entry wait
;
329 struct file
*put_file
;
333 struct io_timeout_data
{
334 struct io_kiocb
*req
;
335 struct hrtimer timer
;
336 struct timespec64 ts
;
337 enum hrtimer_mode mode
;
343 struct sockaddr __user
*addr
;
344 int __user
*addr_len
;
346 unsigned long nofile
;
370 /* NOTE: kiocb has the file as the first member, so don't do it here */
378 struct sockaddr __user
*addr
;
385 struct user_msghdr __user
*msg
;
398 struct filename
*filename
;
399 struct statx __user
*buffer
;
401 unsigned long nofile
;
404 struct io_files_update
{
430 struct epoll_event event
;
433 struct io_async_connect
{
434 struct sockaddr_storage address
;
437 struct io_async_msghdr
{
438 struct iovec fast_iov
[UIO_FASTIOV
];
440 struct sockaddr __user
*uaddr
;
442 struct sockaddr_storage addr
;
446 struct iovec fast_iov
[UIO_FASTIOV
];
452 struct io_async_ctx
{
454 struct io_async_rw rw
;
455 struct io_async_msghdr msg
;
456 struct io_async_connect connect
;
457 struct io_timeout_data timeout
;
462 REQ_F_FIXED_FILE_BIT
= IOSQE_FIXED_FILE_BIT
,
463 REQ_F_IO_DRAIN_BIT
= IOSQE_IO_DRAIN_BIT
,
464 REQ_F_LINK_BIT
= IOSQE_IO_LINK_BIT
,
465 REQ_F_HARDLINK_BIT
= IOSQE_IO_HARDLINK_BIT
,
466 REQ_F_FORCE_ASYNC_BIT
= IOSQE_ASYNC_BIT
,
473 REQ_F_IOPOLL_COMPLETED_BIT
,
474 REQ_F_LINK_TIMEOUT_BIT
,
478 REQ_F_TIMEOUT_NOSEQ_BIT
,
479 REQ_F_COMP_LOCKED_BIT
,
480 REQ_F_NEED_CLEANUP_BIT
,
482 REQ_F_NO_FILE_TABLE_BIT
,
487 REQ_F_FIXED_FILE
= BIT(REQ_F_FIXED_FILE_BIT
),
488 /* drain existing IO first */
489 REQ_F_IO_DRAIN
= BIT(REQ_F_IO_DRAIN_BIT
),
491 REQ_F_LINK
= BIT(REQ_F_LINK_BIT
),
492 /* doesn't sever on completion < 0 */
493 REQ_F_HARDLINK
= BIT(REQ_F_HARDLINK_BIT
),
495 REQ_F_FORCE_ASYNC
= BIT(REQ_F_FORCE_ASYNC_BIT
),
497 /* already grabbed next link */
498 REQ_F_LINK_NEXT
= BIT(REQ_F_LINK_NEXT_BIT
),
499 /* fail rest of links */
500 REQ_F_FAIL_LINK
= BIT(REQ_F_FAIL_LINK_BIT
),
501 /* on inflight list */
502 REQ_F_INFLIGHT
= BIT(REQ_F_INFLIGHT_BIT
),
503 /* read/write uses file position */
504 REQ_F_CUR_POS
= BIT(REQ_F_CUR_POS_BIT
),
505 /* must not punt to workers */
506 REQ_F_NOWAIT
= BIT(REQ_F_NOWAIT_BIT
),
507 /* polled IO has completed */
508 REQ_F_IOPOLL_COMPLETED
= BIT(REQ_F_IOPOLL_COMPLETED_BIT
),
509 /* has linked timeout */
510 REQ_F_LINK_TIMEOUT
= BIT(REQ_F_LINK_TIMEOUT_BIT
),
511 /* timeout request */
512 REQ_F_TIMEOUT
= BIT(REQ_F_TIMEOUT_BIT
),
514 REQ_F_ISREG
= BIT(REQ_F_ISREG_BIT
),
515 /* must be punted even for NONBLOCK */
516 REQ_F_MUST_PUNT
= BIT(REQ_F_MUST_PUNT_BIT
),
517 /* no timeout sequence */
518 REQ_F_TIMEOUT_NOSEQ
= BIT(REQ_F_TIMEOUT_NOSEQ_BIT
),
519 /* completion under lock */
520 REQ_F_COMP_LOCKED
= BIT(REQ_F_COMP_LOCKED_BIT
),
522 REQ_F_NEED_CLEANUP
= BIT(REQ_F_NEED_CLEANUP_BIT
),
523 /* in overflow list */
524 REQ_F_OVERFLOW
= BIT(REQ_F_OVERFLOW_BIT
),
525 /* doesn't need file table for this request */
526 REQ_F_NO_FILE_TABLE
= BIT(REQ_F_NO_FILE_TABLE_BIT
),
530 * NOTE! Each of the iocb union members has the file pointer
531 * as the first entry in their struct definition. So you can
532 * access the file pointer through any of the sub-structs,
533 * or directly as just 'ki_filp' in this struct.
539 struct io_poll_iocb poll
;
540 struct io_accept accept
;
542 struct io_cancel cancel
;
543 struct io_timeout timeout
;
544 struct io_connect connect
;
545 struct io_sr_msg sr_msg
;
547 struct io_close close
;
548 struct io_files_update files_update
;
549 struct io_fadvise fadvise
;
550 struct io_madvise madvise
;
551 struct io_epoll epoll
;
554 struct io_async_ctx
*io
;
556 * llist_node is only used for poll deferred completions
558 struct llist_node llist_node
;
560 bool needs_fixed_file
;
563 struct io_ring_ctx
*ctx
;
565 struct list_head list
;
566 struct hlist_node hash_node
;
568 struct list_head link_list
;
576 struct list_head inflight_entry
;
578 struct io_wq_work work
;
581 #define IO_PLUG_THRESHOLD 2
582 #define IO_IOPOLL_BATCH 8
584 struct io_submit_state
{
585 struct blk_plug plug
;
588 * io_kiocb alloc cache
590 void *reqs
[IO_IOPOLL_BATCH
];
591 unsigned int free_reqs
;
594 * File reference cache
598 unsigned int has_refs
;
599 unsigned int used_refs
;
600 unsigned int ios_left
;
604 /* needs req->io allocated for deferral/async */
605 unsigned async_ctx
: 1;
606 /* needs current->mm setup, does mm access */
607 unsigned needs_mm
: 1;
608 /* needs req->file assigned */
609 unsigned needs_file
: 1;
610 /* needs req->file assigned IFF fd is >= 0 */
611 unsigned fd_non_neg
: 1;
612 /* hash wq insertion if file is a regular file */
613 unsigned hash_reg_file
: 1;
614 /* unbound wq insertion if file is a non-regular file */
615 unsigned unbound_nonreg_file
: 1;
616 /* opcode is not supported by this kernel */
617 unsigned not_supported
: 1;
618 /* needs file table */
619 unsigned file_table
: 1;
621 unsigned needs_fs
: 1;
624 static const struct io_op_def io_op_defs
[] = {
625 [IORING_OP_NOP
] = {},
626 [IORING_OP_READV
] = {
630 .unbound_nonreg_file
= 1,
632 [IORING_OP_WRITEV
] = {
637 .unbound_nonreg_file
= 1,
639 [IORING_OP_FSYNC
] = {
642 [IORING_OP_READ_FIXED
] = {
644 .unbound_nonreg_file
= 1,
646 [IORING_OP_WRITE_FIXED
] = {
649 .unbound_nonreg_file
= 1,
651 [IORING_OP_POLL_ADD
] = {
653 .unbound_nonreg_file
= 1,
655 [IORING_OP_POLL_REMOVE
] = {},
656 [IORING_OP_SYNC_FILE_RANGE
] = {
659 [IORING_OP_SENDMSG
] = {
663 .unbound_nonreg_file
= 1,
666 [IORING_OP_RECVMSG
] = {
670 .unbound_nonreg_file
= 1,
673 [IORING_OP_TIMEOUT
] = {
677 [IORING_OP_TIMEOUT_REMOVE
] = {},
678 [IORING_OP_ACCEPT
] = {
681 .unbound_nonreg_file
= 1,
684 [IORING_OP_ASYNC_CANCEL
] = {},
685 [IORING_OP_LINK_TIMEOUT
] = {
689 [IORING_OP_CONNECT
] = {
693 .unbound_nonreg_file
= 1,
695 [IORING_OP_FALLOCATE
] = {
698 [IORING_OP_OPENAT
] = {
702 [IORING_OP_CLOSE
] = {
706 [IORING_OP_FILES_UPDATE
] = {
710 [IORING_OP_STATX
] = {
718 .unbound_nonreg_file
= 1,
720 [IORING_OP_WRITE
] = {
723 .unbound_nonreg_file
= 1,
725 [IORING_OP_FADVISE
] = {
728 [IORING_OP_MADVISE
] = {
734 .unbound_nonreg_file
= 1,
739 .unbound_nonreg_file
= 1,
741 [IORING_OP_OPENAT2
] = {
745 [IORING_OP_EPOLL_CTL
] = {
746 .unbound_nonreg_file
= 1,
751 static void io_wq_submit_work(struct io_wq_work
**workptr
);
752 static void io_cqring_fill_event(struct io_kiocb
*req
, long res
);
753 static void io_put_req(struct io_kiocb
*req
);
754 static void __io_double_put_req(struct io_kiocb
*req
);
755 static struct io_kiocb
*io_prep_linked_timeout(struct io_kiocb
*req
);
756 static void io_queue_linked_timeout(struct io_kiocb
*req
);
757 static int __io_sqe_files_update(struct io_ring_ctx
*ctx
,
758 struct io_uring_files_update
*ip
,
760 static int io_grab_files(struct io_kiocb
*req
);
761 static void io_ring_file_ref_flush(struct fixed_file_data
*data
);
762 static void io_cleanup_req(struct io_kiocb
*req
);
764 static struct kmem_cache
*req_cachep
;
766 static const struct file_operations io_uring_fops
;
768 struct sock
*io_uring_get_socket(struct file
*file
)
770 #if defined(CONFIG_UNIX)
771 if (file
->f_op
== &io_uring_fops
) {
772 struct io_ring_ctx
*ctx
= file
->private_data
;
774 return ctx
->ring_sock
->sk
;
779 EXPORT_SYMBOL(io_uring_get_socket
);
781 static void io_ring_ctx_ref_free(struct percpu_ref
*ref
)
783 struct io_ring_ctx
*ctx
= container_of(ref
, struct io_ring_ctx
, refs
);
785 complete(&ctx
->completions
[0]);
788 static struct io_ring_ctx
*io_ring_ctx_alloc(struct io_uring_params
*p
)
790 struct io_ring_ctx
*ctx
;
793 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
797 ctx
->fallback_req
= kmem_cache_alloc(req_cachep
, GFP_KERNEL
);
798 if (!ctx
->fallback_req
)
801 ctx
->completions
= kmalloc(2 * sizeof(struct completion
), GFP_KERNEL
);
802 if (!ctx
->completions
)
806 * Use 5 bits less than the max cq entries, that should give us around
807 * 32 entries per hash list if totally full and uniformly spread.
809 hash_bits
= ilog2(p
->cq_entries
);
813 ctx
->cancel_hash_bits
= hash_bits
;
814 ctx
->cancel_hash
= kmalloc((1U << hash_bits
) * sizeof(struct hlist_head
),
816 if (!ctx
->cancel_hash
)
818 __hash_init(ctx
->cancel_hash
, 1U << hash_bits
);
820 if (percpu_ref_init(&ctx
->refs
, io_ring_ctx_ref_free
,
821 PERCPU_REF_ALLOW_REINIT
, GFP_KERNEL
))
824 ctx
->flags
= p
->flags
;
825 init_waitqueue_head(&ctx
->cq_wait
);
826 INIT_LIST_HEAD(&ctx
->cq_overflow_list
);
827 init_completion(&ctx
->completions
[0]);
828 init_completion(&ctx
->completions
[1]);
829 idr_init(&ctx
->personality_idr
);
830 mutex_init(&ctx
->uring_lock
);
831 init_waitqueue_head(&ctx
->wait
);
832 spin_lock_init(&ctx
->completion_lock
);
833 init_llist_head(&ctx
->poll_llist
);
834 INIT_LIST_HEAD(&ctx
->poll_list
);
835 INIT_LIST_HEAD(&ctx
->defer_list
);
836 INIT_LIST_HEAD(&ctx
->timeout_list
);
837 init_waitqueue_head(&ctx
->inflight_wait
);
838 spin_lock_init(&ctx
->inflight_lock
);
839 INIT_LIST_HEAD(&ctx
->inflight_list
);
842 if (ctx
->fallback_req
)
843 kmem_cache_free(req_cachep
, ctx
->fallback_req
);
844 kfree(ctx
->completions
);
845 kfree(ctx
->cancel_hash
);
850 static inline bool __req_need_defer(struct io_kiocb
*req
)
852 struct io_ring_ctx
*ctx
= req
->ctx
;
854 return req
->sequence
!= ctx
->cached_cq_tail
+ ctx
->cached_sq_dropped
855 + atomic_read(&ctx
->cached_cq_overflow
);
858 static inline bool req_need_defer(struct io_kiocb
*req
)
860 if (unlikely(req
->flags
& REQ_F_IO_DRAIN
))
861 return __req_need_defer(req
);
866 static struct io_kiocb
*io_get_deferred_req(struct io_ring_ctx
*ctx
)
868 struct io_kiocb
*req
;
870 req
= list_first_entry_or_null(&ctx
->defer_list
, struct io_kiocb
, list
);
871 if (req
&& !req_need_defer(req
)) {
872 list_del_init(&req
->list
);
879 static struct io_kiocb
*io_get_timeout_req(struct io_ring_ctx
*ctx
)
881 struct io_kiocb
*req
;
883 req
= list_first_entry_or_null(&ctx
->timeout_list
, struct io_kiocb
, list
);
885 if (req
->flags
& REQ_F_TIMEOUT_NOSEQ
)
887 if (!__req_need_defer(req
)) {
888 list_del_init(&req
->list
);
896 static void __io_commit_cqring(struct io_ring_ctx
*ctx
)
898 struct io_rings
*rings
= ctx
->rings
;
900 /* order cqe stores with ring update */
901 smp_store_release(&rings
->cq
.tail
, ctx
->cached_cq_tail
);
903 if (wq_has_sleeper(&ctx
->cq_wait
)) {
904 wake_up_interruptible(&ctx
->cq_wait
);
905 kill_fasync(&ctx
->cq_fasync
, SIGIO
, POLL_IN
);
909 static inline void io_req_work_grab_env(struct io_kiocb
*req
,
910 const struct io_op_def
*def
)
912 if (!req
->work
.mm
&& def
->needs_mm
) {
914 req
->work
.mm
= current
->mm
;
916 if (!req
->work
.creds
)
917 req
->work
.creds
= get_current_cred();
918 if (!req
->work
.fs
&& def
->needs_fs
) {
919 spin_lock(¤t
->fs
->lock
);
920 if (!current
->fs
->in_exec
) {
921 req
->work
.fs
= current
->fs
;
922 req
->work
.fs
->users
++;
924 req
->work
.flags
|= IO_WQ_WORK_CANCEL
;
926 spin_unlock(¤t
->fs
->lock
);
928 if (!req
->work
.task_pid
)
929 req
->work
.task_pid
= task_pid_vnr(current
);
932 static inline void io_req_work_drop_env(struct io_kiocb
*req
)
935 mmdrop(req
->work
.mm
);
938 if (req
->work
.creds
) {
939 put_cred(req
->work
.creds
);
940 req
->work
.creds
= NULL
;
943 struct fs_struct
*fs
= req
->work
.fs
;
945 spin_lock(&req
->work
.fs
->lock
);
948 spin_unlock(&req
->work
.fs
->lock
);
954 static inline bool io_prep_async_work(struct io_kiocb
*req
,
955 struct io_kiocb
**link
)
957 const struct io_op_def
*def
= &io_op_defs
[req
->opcode
];
958 bool do_hashed
= false;
960 if (req
->flags
& REQ_F_ISREG
) {
961 if (def
->hash_reg_file
)
964 if (def
->unbound_nonreg_file
)
965 req
->work
.flags
|= IO_WQ_WORK_UNBOUND
;
968 io_req_work_grab_env(req
, def
);
970 *link
= io_prep_linked_timeout(req
);
974 static inline void io_queue_async_work(struct io_kiocb
*req
)
976 struct io_ring_ctx
*ctx
= req
->ctx
;
977 struct io_kiocb
*link
;
980 do_hashed
= io_prep_async_work(req
, &link
);
982 trace_io_uring_queue_async_work(ctx
, do_hashed
, req
, &req
->work
,
985 io_wq_enqueue(ctx
->io_wq
, &req
->work
);
987 io_wq_enqueue_hashed(ctx
->io_wq
, &req
->work
,
988 file_inode(req
->file
));
992 io_queue_linked_timeout(link
);
995 static void io_kill_timeout(struct io_kiocb
*req
)
999 ret
= hrtimer_try_to_cancel(&req
->io
->timeout
.timer
);
1001 atomic_inc(&req
->ctx
->cq_timeouts
);
1002 list_del_init(&req
->list
);
1003 req
->flags
|= REQ_F_COMP_LOCKED
;
1004 io_cqring_fill_event(req
, 0);
1009 static void io_kill_timeouts(struct io_ring_ctx
*ctx
)
1011 struct io_kiocb
*req
, *tmp
;
1013 spin_lock_irq(&ctx
->completion_lock
);
1014 list_for_each_entry_safe(req
, tmp
, &ctx
->timeout_list
, list
)
1015 io_kill_timeout(req
);
1016 spin_unlock_irq(&ctx
->completion_lock
);
1019 static void io_commit_cqring(struct io_ring_ctx
*ctx
)
1021 struct io_kiocb
*req
;
1023 while ((req
= io_get_timeout_req(ctx
)) != NULL
)
1024 io_kill_timeout(req
);
1026 __io_commit_cqring(ctx
);
1028 while ((req
= io_get_deferred_req(ctx
)) != NULL
)
1029 io_queue_async_work(req
);
1032 static struct io_uring_cqe
*io_get_cqring(struct io_ring_ctx
*ctx
)
1034 struct io_rings
*rings
= ctx
->rings
;
1037 tail
= ctx
->cached_cq_tail
;
1039 * writes to the cq entry need to come after reading head; the
1040 * control dependency is enough as we're using WRITE_ONCE to
1043 if (tail
- READ_ONCE(rings
->cq
.head
) == rings
->cq_ring_entries
)
1046 ctx
->cached_cq_tail
++;
1047 return &rings
->cqes
[tail
& ctx
->cq_mask
];
1050 static inline bool io_should_trigger_evfd(struct io_ring_ctx
*ctx
)
1054 if (!ctx
->eventfd_async
)
1056 return io_wq_current_is_worker() || in_interrupt();
1059 static void __io_cqring_ev_posted(struct io_ring_ctx
*ctx
, bool trigger_ev
)
1061 if (waitqueue_active(&ctx
->wait
))
1062 wake_up(&ctx
->wait
);
1063 if (waitqueue_active(&ctx
->sqo_wait
))
1064 wake_up(&ctx
->sqo_wait
);
1066 eventfd_signal(ctx
->cq_ev_fd
, 1);
1069 static void io_cqring_ev_posted(struct io_ring_ctx
*ctx
)
1071 __io_cqring_ev_posted(ctx
, io_should_trigger_evfd(ctx
));
1074 /* Returns true if there are no backlogged entries after the flush */
1075 static bool io_cqring_overflow_flush(struct io_ring_ctx
*ctx
, bool force
)
1077 struct io_rings
*rings
= ctx
->rings
;
1078 struct io_uring_cqe
*cqe
;
1079 struct io_kiocb
*req
;
1080 unsigned long flags
;
1084 if (list_empty_careful(&ctx
->cq_overflow_list
))
1086 if ((ctx
->cached_cq_tail
- READ_ONCE(rings
->cq
.head
) ==
1087 rings
->cq_ring_entries
))
1091 spin_lock_irqsave(&ctx
->completion_lock
, flags
);
1093 /* if force is set, the ring is going away. always drop after that */
1095 ctx
->cq_overflow_flushed
= 1;
1098 while (!list_empty(&ctx
->cq_overflow_list
)) {
1099 cqe
= io_get_cqring(ctx
);
1103 req
= list_first_entry(&ctx
->cq_overflow_list
, struct io_kiocb
,
1105 list_move(&req
->list
, &list
);
1106 req
->flags
&= ~REQ_F_OVERFLOW
;
1108 WRITE_ONCE(cqe
->user_data
, req
->user_data
);
1109 WRITE_ONCE(cqe
->res
, req
->result
);
1110 WRITE_ONCE(cqe
->flags
, 0);
1112 WRITE_ONCE(ctx
->rings
->cq_overflow
,
1113 atomic_inc_return(&ctx
->cached_cq_overflow
));
1117 io_commit_cqring(ctx
);
1119 clear_bit(0, &ctx
->sq_check_overflow
);
1120 clear_bit(0, &ctx
->cq_check_overflow
);
1122 spin_unlock_irqrestore(&ctx
->completion_lock
, flags
);
1123 io_cqring_ev_posted(ctx
);
1125 while (!list_empty(&list
)) {
1126 req
= list_first_entry(&list
, struct io_kiocb
, list
);
1127 list_del(&req
->list
);
1134 static void io_cqring_fill_event(struct io_kiocb
*req
, long res
)
1136 struct io_ring_ctx
*ctx
= req
->ctx
;
1137 struct io_uring_cqe
*cqe
;
1139 trace_io_uring_complete(ctx
, req
->user_data
, res
);
1142 * If we can't get a cq entry, userspace overflowed the
1143 * submission (by quite a lot). Increment the overflow count in
1146 cqe
= io_get_cqring(ctx
);
1148 WRITE_ONCE(cqe
->user_data
, req
->user_data
);
1149 WRITE_ONCE(cqe
->res
, res
);
1150 WRITE_ONCE(cqe
->flags
, 0);
1151 } else if (ctx
->cq_overflow_flushed
) {
1152 WRITE_ONCE(ctx
->rings
->cq_overflow
,
1153 atomic_inc_return(&ctx
->cached_cq_overflow
));
1155 if (list_empty(&ctx
->cq_overflow_list
)) {
1156 set_bit(0, &ctx
->sq_check_overflow
);
1157 set_bit(0, &ctx
->cq_check_overflow
);
1159 req
->flags
|= REQ_F_OVERFLOW
;
1160 refcount_inc(&req
->refs
);
1162 list_add_tail(&req
->list
, &ctx
->cq_overflow_list
);
1166 static void io_cqring_add_event(struct io_kiocb
*req
, long res
)
1168 struct io_ring_ctx
*ctx
= req
->ctx
;
1169 unsigned long flags
;
1171 spin_lock_irqsave(&ctx
->completion_lock
, flags
);
1172 io_cqring_fill_event(req
, res
);
1173 io_commit_cqring(ctx
);
1174 spin_unlock_irqrestore(&ctx
->completion_lock
, flags
);
1176 io_cqring_ev_posted(ctx
);
1179 static inline bool io_is_fallback_req(struct io_kiocb
*req
)
1181 return req
== (struct io_kiocb
*)
1182 ((unsigned long) req
->ctx
->fallback_req
& ~1UL);
1185 static struct io_kiocb
*io_get_fallback_req(struct io_ring_ctx
*ctx
)
1187 struct io_kiocb
*req
;
1189 req
= ctx
->fallback_req
;
1190 if (!test_and_set_bit_lock(0, (unsigned long *) ctx
->fallback_req
))
1196 static struct io_kiocb
*io_get_req(struct io_ring_ctx
*ctx
,
1197 struct io_submit_state
*state
)
1199 gfp_t gfp
= GFP_KERNEL
| __GFP_NOWARN
;
1200 struct io_kiocb
*req
;
1203 req
= kmem_cache_alloc(req_cachep
, gfp
);
1206 } else if (!state
->free_reqs
) {
1210 sz
= min_t(size_t, state
->ios_left
, ARRAY_SIZE(state
->reqs
));
1211 ret
= kmem_cache_alloc_bulk(req_cachep
, gfp
, sz
, state
->reqs
);
1214 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1215 * retry single alloc to be on the safe side.
1217 if (unlikely(ret
<= 0)) {
1218 state
->reqs
[0] = kmem_cache_alloc(req_cachep
, gfp
);
1219 if (!state
->reqs
[0])
1223 state
->free_reqs
= ret
- 1;
1224 req
= state
->reqs
[ret
- 1];
1227 req
= state
->reqs
[state
->free_reqs
];
1235 /* one is dropped after submission, the other at completion */
1236 refcount_set(&req
->refs
, 2);
1238 INIT_IO_WORK(&req
->work
, io_wq_submit_work
);
1241 req
= io_get_fallback_req(ctx
);
1247 static void __io_req_do_free(struct io_kiocb
*req
)
1249 if (likely(!io_is_fallback_req(req
)))
1250 kmem_cache_free(req_cachep
, req
);
1252 clear_bit_unlock(0, (unsigned long *) req
->ctx
->fallback_req
);
1255 static void __io_req_aux_free(struct io_kiocb
*req
)
1257 struct io_ring_ctx
*ctx
= req
->ctx
;
1259 if (req
->flags
& REQ_F_NEED_CLEANUP
)
1260 io_cleanup_req(req
);
1264 if (req
->flags
& REQ_F_FIXED_FILE
)
1265 percpu_ref_put(&ctx
->file_data
->refs
);
1270 io_req_work_drop_env(req
);
1273 static void __io_free_req(struct io_kiocb
*req
)
1275 __io_req_aux_free(req
);
1277 if (req
->flags
& REQ_F_INFLIGHT
) {
1278 struct io_ring_ctx
*ctx
= req
->ctx
;
1279 unsigned long flags
;
1281 spin_lock_irqsave(&ctx
->inflight_lock
, flags
);
1282 list_del(&req
->inflight_entry
);
1283 if (waitqueue_active(&ctx
->inflight_wait
))
1284 wake_up(&ctx
->inflight_wait
);
1285 spin_unlock_irqrestore(&ctx
->inflight_lock
, flags
);
1288 percpu_ref_put(&req
->ctx
->refs
);
1289 __io_req_do_free(req
);
1293 void *reqs
[IO_IOPOLL_BATCH
];
1298 static void io_free_req_many(struct io_ring_ctx
*ctx
, struct req_batch
*rb
)
1300 int fixed_refs
= rb
->to_free
;
1304 if (rb
->need_iter
) {
1305 int i
, inflight
= 0;
1306 unsigned long flags
;
1309 for (i
= 0; i
< rb
->to_free
; i
++) {
1310 struct io_kiocb
*req
= rb
->reqs
[i
];
1312 if (req
->flags
& REQ_F_FIXED_FILE
) {
1316 if (req
->flags
& REQ_F_INFLIGHT
)
1318 __io_req_aux_free(req
);
1323 spin_lock_irqsave(&ctx
->inflight_lock
, flags
);
1324 for (i
= 0; i
< rb
->to_free
; i
++) {
1325 struct io_kiocb
*req
= rb
->reqs
[i
];
1327 if (req
->flags
& REQ_F_INFLIGHT
) {
1328 list_del(&req
->inflight_entry
);
1333 spin_unlock_irqrestore(&ctx
->inflight_lock
, flags
);
1335 if (waitqueue_active(&ctx
->inflight_wait
))
1336 wake_up(&ctx
->inflight_wait
);
1339 kmem_cache_free_bulk(req_cachep
, rb
->to_free
, rb
->reqs
);
1341 percpu_ref_put_many(&ctx
->file_data
->refs
, fixed_refs
);
1342 percpu_ref_put_many(&ctx
->refs
, rb
->to_free
);
1343 rb
->to_free
= rb
->need_iter
= 0;
1346 static bool io_link_cancel_timeout(struct io_kiocb
*req
)
1348 struct io_ring_ctx
*ctx
= req
->ctx
;
1351 ret
= hrtimer_try_to_cancel(&req
->io
->timeout
.timer
);
1353 io_cqring_fill_event(req
, -ECANCELED
);
1354 io_commit_cqring(ctx
);
1355 req
->flags
&= ~REQ_F_LINK
;
1363 static void io_req_link_next(struct io_kiocb
*req
, struct io_kiocb
**nxtptr
)
1365 struct io_ring_ctx
*ctx
= req
->ctx
;
1366 bool wake_ev
= false;
1368 /* Already got next link */
1369 if (req
->flags
& REQ_F_LINK_NEXT
)
1373 * The list should never be empty when we are called here. But could
1374 * potentially happen if the chain is messed up, check to be on the
1377 while (!list_empty(&req
->link_list
)) {
1378 struct io_kiocb
*nxt
= list_first_entry(&req
->link_list
,
1379 struct io_kiocb
, link_list
);
1381 if (unlikely((req
->flags
& REQ_F_LINK_TIMEOUT
) &&
1382 (nxt
->flags
& REQ_F_TIMEOUT
))) {
1383 list_del_init(&nxt
->link_list
);
1384 wake_ev
|= io_link_cancel_timeout(nxt
);
1385 req
->flags
&= ~REQ_F_LINK_TIMEOUT
;
1389 list_del_init(&req
->link_list
);
1390 if (!list_empty(&nxt
->link_list
))
1391 nxt
->flags
|= REQ_F_LINK
;
1396 req
->flags
|= REQ_F_LINK_NEXT
;
1398 io_cqring_ev_posted(ctx
);
1402 * Called if REQ_F_LINK is set, and we fail the head request
1404 static void io_fail_links(struct io_kiocb
*req
)
1406 struct io_ring_ctx
*ctx
= req
->ctx
;
1407 unsigned long flags
;
1409 spin_lock_irqsave(&ctx
->completion_lock
, flags
);
1411 while (!list_empty(&req
->link_list
)) {
1412 struct io_kiocb
*link
= list_first_entry(&req
->link_list
,
1413 struct io_kiocb
, link_list
);
1415 list_del_init(&link
->link_list
);
1416 trace_io_uring_fail_link(req
, link
);
1418 if ((req
->flags
& REQ_F_LINK_TIMEOUT
) &&
1419 link
->opcode
== IORING_OP_LINK_TIMEOUT
) {
1420 io_link_cancel_timeout(link
);
1422 io_cqring_fill_event(link
, -ECANCELED
);
1423 __io_double_put_req(link
);
1425 req
->flags
&= ~REQ_F_LINK_TIMEOUT
;
1428 io_commit_cqring(ctx
);
1429 spin_unlock_irqrestore(&ctx
->completion_lock
, flags
);
1430 io_cqring_ev_posted(ctx
);
1433 static void io_req_find_next(struct io_kiocb
*req
, struct io_kiocb
**nxt
)
1435 if (likely(!(req
->flags
& REQ_F_LINK
)))
1439 * If LINK is set, we have dependent requests in this chain. If we
1440 * didn't fail this request, queue the first one up, moving any other
1441 * dependencies to the next request. In case of failure, fail the rest
1444 if (req
->flags
& REQ_F_FAIL_LINK
) {
1446 } else if ((req
->flags
& (REQ_F_LINK_TIMEOUT
| REQ_F_COMP_LOCKED
)) ==
1447 REQ_F_LINK_TIMEOUT
) {
1448 struct io_ring_ctx
*ctx
= req
->ctx
;
1449 unsigned long flags
;
1452 * If this is a timeout link, we could be racing with the
1453 * timeout timer. Grab the completion lock for this case to
1454 * protect against that.
1456 spin_lock_irqsave(&ctx
->completion_lock
, flags
);
1457 io_req_link_next(req
, nxt
);
1458 spin_unlock_irqrestore(&ctx
->completion_lock
, flags
);
1460 io_req_link_next(req
, nxt
);
1464 static void io_free_req(struct io_kiocb
*req
)
1466 struct io_kiocb
*nxt
= NULL
;
1468 io_req_find_next(req
, &nxt
);
1472 io_queue_async_work(nxt
);
1476 * Drop reference to request, return next in chain (if there is one) if this
1477 * was the last reference to this request.
1479 __attribute__((nonnull
))
1480 static void io_put_req_find_next(struct io_kiocb
*req
, struct io_kiocb
**nxtptr
)
1482 if (refcount_dec_and_test(&req
->refs
)) {
1483 io_req_find_next(req
, nxtptr
);
1488 static void io_put_req(struct io_kiocb
*req
)
1490 if (refcount_dec_and_test(&req
->refs
))
1495 * Must only be used if we don't need to care about links, usually from
1496 * within the completion handling itself.
1498 static void __io_double_put_req(struct io_kiocb
*req
)
1500 /* drop both submit and complete references */
1501 if (refcount_sub_and_test(2, &req
->refs
))
1505 static void io_double_put_req(struct io_kiocb
*req
)
1507 /* drop both submit and complete references */
1508 if (refcount_sub_and_test(2, &req
->refs
))
1512 static unsigned io_cqring_events(struct io_ring_ctx
*ctx
, bool noflush
)
1514 struct io_rings
*rings
= ctx
->rings
;
1516 if (test_bit(0, &ctx
->cq_check_overflow
)) {
1518 * noflush == true is from the waitqueue handler, just ensure
1519 * we wake up the task, and the next invocation will flush the
1520 * entries. We cannot safely to it from here.
1522 if (noflush
&& !list_empty(&ctx
->cq_overflow_list
))
1525 io_cqring_overflow_flush(ctx
, false);
1528 /* See comment at the top of this file */
1530 return ctx
->cached_cq_tail
- READ_ONCE(rings
->cq
.head
);
1533 static inline unsigned int io_sqring_entries(struct io_ring_ctx
*ctx
)
1535 struct io_rings
*rings
= ctx
->rings
;
1537 /* make sure SQ entry isn't read before tail */
1538 return smp_load_acquire(&rings
->sq
.tail
) - ctx
->cached_sq_head
;
1541 static inline bool io_req_multi_free(struct req_batch
*rb
, struct io_kiocb
*req
)
1543 if ((req
->flags
& REQ_F_LINK
) || io_is_fallback_req(req
))
1546 if (!(req
->flags
& REQ_F_FIXED_FILE
) || req
->io
)
1549 rb
->reqs
[rb
->to_free
++] = req
;
1550 if (unlikely(rb
->to_free
== ARRAY_SIZE(rb
->reqs
)))
1551 io_free_req_many(req
->ctx
, rb
);
1556 * Find and free completed poll iocbs
1558 static void io_iopoll_complete(struct io_ring_ctx
*ctx
, unsigned int *nr_events
,
1559 struct list_head
*done
)
1561 struct req_batch rb
;
1562 struct io_kiocb
*req
;
1564 rb
.to_free
= rb
.need_iter
= 0;
1565 while (!list_empty(done
)) {
1566 req
= list_first_entry(done
, struct io_kiocb
, list
);
1567 list_del(&req
->list
);
1569 io_cqring_fill_event(req
, req
->result
);
1572 if (refcount_dec_and_test(&req
->refs
) &&
1573 !io_req_multi_free(&rb
, req
))
1577 io_commit_cqring(ctx
);
1578 io_free_req_many(ctx
, &rb
);
1581 static int io_do_iopoll(struct io_ring_ctx
*ctx
, unsigned int *nr_events
,
1584 struct io_kiocb
*req
, *tmp
;
1590 * Only spin for completions if we don't have multiple devices hanging
1591 * off our complete list, and we're under the requested amount.
1593 spin
= !ctx
->poll_multi_file
&& *nr_events
< min
;
1596 list_for_each_entry_safe(req
, tmp
, &ctx
->poll_list
, list
) {
1597 struct kiocb
*kiocb
= &req
->rw
.kiocb
;
1600 * Move completed entries to our local list. If we find a
1601 * request that requires polling, break out and complete
1602 * the done list first, if we have entries there.
1604 if (req
->flags
& REQ_F_IOPOLL_COMPLETED
) {
1605 list_move_tail(&req
->list
, &done
);
1608 if (!list_empty(&done
))
1611 ret
= kiocb
->ki_filp
->f_op
->iopoll(kiocb
, spin
);
1620 if (!list_empty(&done
))
1621 io_iopoll_complete(ctx
, nr_events
, &done
);
1627 * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a
1628 * non-spinning poll check - we'll still enter the driver poll loop, but only
1629 * as a non-spinning completion check.
1631 static int io_iopoll_getevents(struct io_ring_ctx
*ctx
, unsigned int *nr_events
,
1634 while (!list_empty(&ctx
->poll_list
) && !need_resched()) {
1637 ret
= io_do_iopoll(ctx
, nr_events
, min
);
1640 if (!min
|| *nr_events
>= min
)
1648 * We can't just wait for polled events to come to us, we have to actively
1649 * find and complete them.
1651 static void io_iopoll_reap_events(struct io_ring_ctx
*ctx
)
1653 if (!(ctx
->flags
& IORING_SETUP_IOPOLL
))
1656 mutex_lock(&ctx
->uring_lock
);
1657 while (!list_empty(&ctx
->poll_list
)) {
1658 unsigned int nr_events
= 0;
1660 io_iopoll_getevents(ctx
, &nr_events
, 1);
1663 * Ensure we allow local-to-the-cpu processing to take place,
1664 * in this case we need to ensure that we reap all events.
1668 mutex_unlock(&ctx
->uring_lock
);
1671 static int io_iopoll_check(struct io_ring_ctx
*ctx
, unsigned *nr_events
,
1674 int iters
= 0, ret
= 0;
1677 * We disallow the app entering submit/complete with polling, but we
1678 * still need to lock the ring to prevent racing with polled issue
1679 * that got punted to a workqueue.
1681 mutex_lock(&ctx
->uring_lock
);
1686 * Don't enter poll loop if we already have events pending.
1687 * If we do, we can potentially be spinning for commands that
1688 * already triggered a CQE (eg in error).
1690 if (io_cqring_events(ctx
, false))
1694 * If a submit got punted to a workqueue, we can have the
1695 * application entering polling for a command before it gets
1696 * issued. That app will hold the uring_lock for the duration
1697 * of the poll right here, so we need to take a breather every
1698 * now and then to ensure that the issue has a chance to add
1699 * the poll to the issued list. Otherwise we can spin here
1700 * forever, while the workqueue is stuck trying to acquire the
1703 if (!(++iters
& 7)) {
1704 mutex_unlock(&ctx
->uring_lock
);
1705 mutex_lock(&ctx
->uring_lock
);
1708 if (*nr_events
< min
)
1709 tmin
= min
- *nr_events
;
1711 ret
= io_iopoll_getevents(ctx
, nr_events
, tmin
);
1715 } while (min
&& !*nr_events
&& !need_resched());
1717 mutex_unlock(&ctx
->uring_lock
);
1721 static void kiocb_end_write(struct io_kiocb
*req
)
1724 * Tell lockdep we inherited freeze protection from submission
1727 if (req
->flags
& REQ_F_ISREG
) {
1728 struct inode
*inode
= file_inode(req
->file
);
1730 __sb_writers_acquired(inode
->i_sb
, SB_FREEZE_WRITE
);
1732 file_end_write(req
->file
);
1735 static inline void req_set_fail_links(struct io_kiocb
*req
)
1737 if ((req
->flags
& (REQ_F_LINK
| REQ_F_HARDLINK
)) == REQ_F_LINK
)
1738 req
->flags
|= REQ_F_FAIL_LINK
;
1741 static void io_complete_rw_common(struct kiocb
*kiocb
, long res
)
1743 struct io_kiocb
*req
= container_of(kiocb
, struct io_kiocb
, rw
.kiocb
);
1745 if (kiocb
->ki_flags
& IOCB_WRITE
)
1746 kiocb_end_write(req
);
1748 if (res
!= req
->result
)
1749 req_set_fail_links(req
);
1750 io_cqring_add_event(req
, res
);
1753 static void io_complete_rw(struct kiocb
*kiocb
, long res
, long res2
)
1755 struct io_kiocb
*req
= container_of(kiocb
, struct io_kiocb
, rw
.kiocb
);
1757 io_complete_rw_common(kiocb
, res
);
1761 static struct io_kiocb
*__io_complete_rw(struct kiocb
*kiocb
, long res
)
1763 struct io_kiocb
*req
= container_of(kiocb
, struct io_kiocb
, rw
.kiocb
);
1764 struct io_kiocb
*nxt
= NULL
;
1766 io_complete_rw_common(kiocb
, res
);
1767 io_put_req_find_next(req
, &nxt
);
1772 static void io_complete_rw_iopoll(struct kiocb
*kiocb
, long res
, long res2
)
1774 struct io_kiocb
*req
= container_of(kiocb
, struct io_kiocb
, rw
.kiocb
);
1776 if (kiocb
->ki_flags
& IOCB_WRITE
)
1777 kiocb_end_write(req
);
1779 if (res
!= req
->result
)
1780 req_set_fail_links(req
);
1783 req
->flags
|= REQ_F_IOPOLL_COMPLETED
;
1787 * After the iocb has been issued, it's safe to be found on the poll list.
1788 * Adding the kiocb to the list AFTER submission ensures that we don't
1789 * find it from a io_iopoll_getevents() thread before the issuer is done
1790 * accessing the kiocb cookie.
1792 static void io_iopoll_req_issued(struct io_kiocb
*req
)
1794 struct io_ring_ctx
*ctx
= req
->ctx
;
1797 * Track whether we have multiple files in our lists. This will impact
1798 * how we do polling eventually, not spinning if we're on potentially
1799 * different devices.
1801 if (list_empty(&ctx
->poll_list
)) {
1802 ctx
->poll_multi_file
= false;
1803 } else if (!ctx
->poll_multi_file
) {
1804 struct io_kiocb
*list_req
;
1806 list_req
= list_first_entry(&ctx
->poll_list
, struct io_kiocb
,
1808 if (list_req
->file
!= req
->file
)
1809 ctx
->poll_multi_file
= true;
1813 * For fast devices, IO may have already completed. If it has, add
1814 * it to the front so we find it first.
1816 if (req
->flags
& REQ_F_IOPOLL_COMPLETED
)
1817 list_add(&req
->list
, &ctx
->poll_list
);
1819 list_add_tail(&req
->list
, &ctx
->poll_list
);
1821 if ((ctx
->flags
& IORING_SETUP_SQPOLL
) &&
1822 wq_has_sleeper(&ctx
->sqo_wait
))
1823 wake_up(&ctx
->sqo_wait
);
1826 static void io_file_put(struct io_submit_state
*state
)
1829 int diff
= state
->has_refs
- state
->used_refs
;
1832 fput_many(state
->file
, diff
);
1838 * Get as many references to a file as we have IOs left in this submission,
1839 * assuming most submissions are for one file, or at least that each file
1840 * has more than one submission.
1842 static struct file
*io_file_get(struct io_submit_state
*state
, int fd
)
1848 if (state
->fd
== fd
) {
1855 state
->file
= fget_many(fd
, state
->ios_left
);
1860 state
->has_refs
= state
->ios_left
;
1861 state
->used_refs
= 1;
1867 * If we tracked the file through the SCM inflight mechanism, we could support
1868 * any file. For now, just ensure that anything potentially problematic is done
1871 static bool io_file_supports_async(struct file
*file
)
1873 umode_t mode
= file_inode(file
)->i_mode
;
1875 if (S_ISBLK(mode
) || S_ISCHR(mode
) || S_ISSOCK(mode
))
1877 if (S_ISREG(mode
) && file
->f_op
!= &io_uring_fops
)
1883 static int io_prep_rw(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
,
1884 bool force_nonblock
)
1886 struct io_ring_ctx
*ctx
= req
->ctx
;
1887 struct kiocb
*kiocb
= &req
->rw
.kiocb
;
1891 if (S_ISREG(file_inode(req
->file
)->i_mode
))
1892 req
->flags
|= REQ_F_ISREG
;
1894 kiocb
->ki_pos
= READ_ONCE(sqe
->off
);
1895 if (kiocb
->ki_pos
== -1 && !(req
->file
->f_mode
& FMODE_STREAM
)) {
1896 req
->flags
|= REQ_F_CUR_POS
;
1897 kiocb
->ki_pos
= req
->file
->f_pos
;
1899 kiocb
->ki_hint
= ki_hint_validate(file_write_hint(kiocb
->ki_filp
));
1900 kiocb
->ki_flags
= iocb_flags(kiocb
->ki_filp
);
1901 ret
= kiocb_set_rw_flags(kiocb
, READ_ONCE(sqe
->rw_flags
));
1905 ioprio
= READ_ONCE(sqe
->ioprio
);
1907 ret
= ioprio_check_cap(ioprio
);
1911 kiocb
->ki_ioprio
= ioprio
;
1913 kiocb
->ki_ioprio
= get_current_ioprio();
1915 /* don't allow async punt if RWF_NOWAIT was requested */
1916 if ((kiocb
->ki_flags
& IOCB_NOWAIT
) ||
1917 (req
->file
->f_flags
& O_NONBLOCK
))
1918 req
->flags
|= REQ_F_NOWAIT
;
1921 kiocb
->ki_flags
|= IOCB_NOWAIT
;
1923 if (ctx
->flags
& IORING_SETUP_IOPOLL
) {
1924 if (!(kiocb
->ki_flags
& IOCB_DIRECT
) ||
1925 !kiocb
->ki_filp
->f_op
->iopoll
)
1928 kiocb
->ki_flags
|= IOCB_HIPRI
;
1929 kiocb
->ki_complete
= io_complete_rw_iopoll
;
1932 if (kiocb
->ki_flags
& IOCB_HIPRI
)
1934 kiocb
->ki_complete
= io_complete_rw
;
1937 req
->rw
.addr
= READ_ONCE(sqe
->addr
);
1938 req
->rw
.len
= READ_ONCE(sqe
->len
);
1939 /* we own ->private, reuse it for the buffer index */
1940 req
->rw
.kiocb
.private = (void *) (unsigned long)
1941 READ_ONCE(sqe
->buf_index
);
1945 static inline void io_rw_done(struct kiocb
*kiocb
, ssize_t ret
)
1951 case -ERESTARTNOINTR
:
1952 case -ERESTARTNOHAND
:
1953 case -ERESTART_RESTARTBLOCK
:
1955 * We can't just restart the syscall, since previously
1956 * submitted sqes may already be in progress. Just fail this
1962 kiocb
->ki_complete(kiocb
, ret
, 0);
1966 static void kiocb_done(struct kiocb
*kiocb
, ssize_t ret
, struct io_kiocb
**nxt
,
1969 struct io_kiocb
*req
= container_of(kiocb
, struct io_kiocb
, rw
.kiocb
);
1971 if (req
->flags
& REQ_F_CUR_POS
)
1972 req
->file
->f_pos
= kiocb
->ki_pos
;
1973 if (in_async
&& ret
>= 0 && kiocb
->ki_complete
== io_complete_rw
)
1974 *nxt
= __io_complete_rw(kiocb
, ret
);
1976 io_rw_done(kiocb
, ret
);
1979 static ssize_t
io_import_fixed(struct io_kiocb
*req
, int rw
,
1980 struct iov_iter
*iter
)
1982 struct io_ring_ctx
*ctx
= req
->ctx
;
1983 size_t len
= req
->rw
.len
;
1984 struct io_mapped_ubuf
*imu
;
1985 unsigned index
, buf_index
;
1989 /* attempt to use fixed buffers without having provided iovecs */
1990 if (unlikely(!ctx
->user_bufs
))
1993 buf_index
= (unsigned long) req
->rw
.kiocb
.private;
1994 if (unlikely(buf_index
>= ctx
->nr_user_bufs
))
1997 index
= array_index_nospec(buf_index
, ctx
->nr_user_bufs
);
1998 imu
= &ctx
->user_bufs
[index
];
1999 buf_addr
= req
->rw
.addr
;
2002 if (buf_addr
+ len
< buf_addr
)
2004 /* not inside the mapped region */
2005 if (buf_addr
< imu
->ubuf
|| buf_addr
+ len
> imu
->ubuf
+ imu
->len
)
2009 * May not be a start of buffer, set size appropriately
2010 * and advance us to the beginning.
2012 offset
= buf_addr
- imu
->ubuf
;
2013 iov_iter_bvec(iter
, rw
, imu
->bvec
, imu
->nr_bvecs
, offset
+ len
);
2017 * Don't use iov_iter_advance() here, as it's really slow for
2018 * using the latter parts of a big fixed buffer - it iterates
2019 * over each segment manually. We can cheat a bit here, because
2022 * 1) it's a BVEC iter, we set it up
2023 * 2) all bvecs are PAGE_SIZE in size, except potentially the
2024 * first and last bvec
2026 * So just find our index, and adjust the iterator afterwards.
2027 * If the offset is within the first bvec (or the whole first
2028 * bvec, just use iov_iter_advance(). This makes it easier
2029 * since we can just skip the first segment, which may not
2030 * be PAGE_SIZE aligned.
2032 const struct bio_vec
*bvec
= imu
->bvec
;
2034 if (offset
<= bvec
->bv_len
) {
2035 iov_iter_advance(iter
, offset
);
2037 unsigned long seg_skip
;
2039 /* skip first vec */
2040 offset
-= bvec
->bv_len
;
2041 seg_skip
= 1 + (offset
>> PAGE_SHIFT
);
2043 iter
->bvec
= bvec
+ seg_skip
;
2044 iter
->nr_segs
-= seg_skip
;
2045 iter
->count
-= bvec
->bv_len
+ offset
;
2046 iter
->iov_offset
= offset
& ~PAGE_MASK
;
2053 static ssize_t
io_import_iovec(int rw
, struct io_kiocb
*req
,
2054 struct iovec
**iovec
, struct iov_iter
*iter
)
2056 void __user
*buf
= u64_to_user_ptr(req
->rw
.addr
);
2057 size_t sqe_len
= req
->rw
.len
;
2060 opcode
= req
->opcode
;
2061 if (opcode
== IORING_OP_READ_FIXED
|| opcode
== IORING_OP_WRITE_FIXED
) {
2063 return io_import_fixed(req
, rw
, iter
);
2066 /* buffer index only valid with fixed read/write */
2067 if (req
->rw
.kiocb
.private)
2070 if (opcode
== IORING_OP_READ
|| opcode
== IORING_OP_WRITE
) {
2072 ret
= import_single_range(rw
, buf
, sqe_len
, *iovec
, iter
);
2074 return ret
< 0 ? ret
: sqe_len
;
2078 struct io_async_rw
*iorw
= &req
->io
->rw
;
2081 iov_iter_init(iter
, rw
, *iovec
, iorw
->nr_segs
, iorw
->size
);
2082 if (iorw
->iov
== iorw
->fast_iov
)
2087 #ifdef CONFIG_COMPAT
2088 if (req
->ctx
->compat
)
2089 return compat_import_iovec(rw
, buf
, sqe_len
, UIO_FASTIOV
,
2093 return import_iovec(rw
, buf
, sqe_len
, UIO_FASTIOV
, iovec
, iter
);
2097 * For files that don't have ->read_iter() and ->write_iter(), handle them
2098 * by looping over ->read() or ->write() manually.
2100 static ssize_t
loop_rw_iter(int rw
, struct file
*file
, struct kiocb
*kiocb
,
2101 struct iov_iter
*iter
)
2106 * Don't support polled IO through this interface, and we can't
2107 * support non-blocking either. For the latter, this just causes
2108 * the kiocb to be handled from an async context.
2110 if (kiocb
->ki_flags
& IOCB_HIPRI
)
2112 if (kiocb
->ki_flags
& IOCB_NOWAIT
)
2115 while (iov_iter_count(iter
)) {
2119 if (!iov_iter_is_bvec(iter
)) {
2120 iovec
= iov_iter_iovec(iter
);
2122 /* fixed buffers import bvec */
2123 iovec
.iov_base
= kmap(iter
->bvec
->bv_page
)
2125 iovec
.iov_len
= min(iter
->count
,
2126 iter
->bvec
->bv_len
- iter
->iov_offset
);
2130 nr
= file
->f_op
->read(file
, iovec
.iov_base
,
2131 iovec
.iov_len
, &kiocb
->ki_pos
);
2133 nr
= file
->f_op
->write(file
, iovec
.iov_base
,
2134 iovec
.iov_len
, &kiocb
->ki_pos
);
2137 if (iov_iter_is_bvec(iter
))
2138 kunmap(iter
->bvec
->bv_page
);
2146 if (nr
!= iovec
.iov_len
)
2148 iov_iter_advance(iter
, nr
);
2154 static void io_req_map_rw(struct io_kiocb
*req
, ssize_t io_size
,
2155 struct iovec
*iovec
, struct iovec
*fast_iov
,
2156 struct iov_iter
*iter
)
2158 req
->io
->rw
.nr_segs
= iter
->nr_segs
;
2159 req
->io
->rw
.size
= io_size
;
2160 req
->io
->rw
.iov
= iovec
;
2161 if (!req
->io
->rw
.iov
) {
2162 req
->io
->rw
.iov
= req
->io
->rw
.fast_iov
;
2163 memcpy(req
->io
->rw
.iov
, fast_iov
,
2164 sizeof(struct iovec
) * iter
->nr_segs
);
2166 req
->flags
|= REQ_F_NEED_CLEANUP
;
2170 static int io_alloc_async_ctx(struct io_kiocb
*req
)
2172 if (!io_op_defs
[req
->opcode
].async_ctx
)
2174 req
->io
= kmalloc(sizeof(*req
->io
), GFP_KERNEL
);
2175 return req
->io
== NULL
;
2178 static int io_setup_async_rw(struct io_kiocb
*req
, ssize_t io_size
,
2179 struct iovec
*iovec
, struct iovec
*fast_iov
,
2180 struct iov_iter
*iter
)
2182 if (!io_op_defs
[req
->opcode
].async_ctx
)
2185 if (io_alloc_async_ctx(req
))
2188 io_req_map_rw(req
, io_size
, iovec
, fast_iov
, iter
);
2193 static int io_read_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
,
2194 bool force_nonblock
)
2196 struct io_async_ctx
*io
;
2197 struct iov_iter iter
;
2200 ret
= io_prep_rw(req
, sqe
, force_nonblock
);
2204 if (unlikely(!(req
->file
->f_mode
& FMODE_READ
)))
2207 /* either don't need iovec imported or already have it */
2208 if (!req
->io
|| req
->flags
& REQ_F_NEED_CLEANUP
)
2212 io
->rw
.iov
= io
->rw
.fast_iov
;
2214 ret
= io_import_iovec(READ
, req
, &io
->rw
.iov
, &iter
);
2219 io_req_map_rw(req
, ret
, io
->rw
.iov
, io
->rw
.fast_iov
, &iter
);
2223 static int io_read(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
2224 bool force_nonblock
)
2226 struct iovec inline_vecs
[UIO_FASTIOV
], *iovec
= inline_vecs
;
2227 struct kiocb
*kiocb
= &req
->rw
.kiocb
;
2228 struct iov_iter iter
;
2230 ssize_t io_size
, ret
;
2232 ret
= io_import_iovec(READ
, req
, &iovec
, &iter
);
2236 /* Ensure we clear previously set non-block flag */
2237 if (!force_nonblock
)
2238 req
->rw
.kiocb
.ki_flags
&= ~IOCB_NOWAIT
;
2242 if (req
->flags
& REQ_F_LINK
)
2243 req
->result
= io_size
;
2246 * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
2247 * we know to async punt it even if it was opened O_NONBLOCK
2249 if (force_nonblock
&& !io_file_supports_async(req
->file
)) {
2250 req
->flags
|= REQ_F_MUST_PUNT
;
2254 iov_count
= iov_iter_count(&iter
);
2255 ret
= rw_verify_area(READ
, req
->file
, &kiocb
->ki_pos
, iov_count
);
2259 if (req
->file
->f_op
->read_iter
)
2260 ret2
= call_read_iter(req
->file
, kiocb
, &iter
);
2262 ret2
= loop_rw_iter(READ
, req
->file
, kiocb
, &iter
);
2264 /* Catch -EAGAIN return for forced non-blocking submission */
2265 if (!force_nonblock
|| ret2
!= -EAGAIN
) {
2266 kiocb_done(kiocb
, ret2
, nxt
, req
->in_async
);
2269 ret
= io_setup_async_rw(req
, io_size
, iovec
,
2270 inline_vecs
, &iter
);
2278 req
->flags
&= ~REQ_F_NEED_CLEANUP
;
2282 static int io_write_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
,
2283 bool force_nonblock
)
2285 struct io_async_ctx
*io
;
2286 struct iov_iter iter
;
2289 ret
= io_prep_rw(req
, sqe
, force_nonblock
);
2293 if (unlikely(!(req
->file
->f_mode
& FMODE_WRITE
)))
2296 req
->fsize
= rlimit(RLIMIT_FSIZE
);
2298 /* either don't need iovec imported or already have it */
2299 if (!req
->io
|| req
->flags
& REQ_F_NEED_CLEANUP
)
2303 io
->rw
.iov
= io
->rw
.fast_iov
;
2305 ret
= io_import_iovec(WRITE
, req
, &io
->rw
.iov
, &iter
);
2310 io_req_map_rw(req
, ret
, io
->rw
.iov
, io
->rw
.fast_iov
, &iter
);
2314 static int io_write(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
2315 bool force_nonblock
)
2317 struct iovec inline_vecs
[UIO_FASTIOV
], *iovec
= inline_vecs
;
2318 struct kiocb
*kiocb
= &req
->rw
.kiocb
;
2319 struct iov_iter iter
;
2321 ssize_t ret
, io_size
;
2323 ret
= io_import_iovec(WRITE
, req
, &iovec
, &iter
);
2327 /* Ensure we clear previously set non-block flag */
2328 if (!force_nonblock
)
2329 req
->rw
.kiocb
.ki_flags
&= ~IOCB_NOWAIT
;
2333 if (req
->flags
& REQ_F_LINK
)
2334 req
->result
= io_size
;
2337 * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
2338 * we know to async punt it even if it was opened O_NONBLOCK
2340 if (force_nonblock
&& !io_file_supports_async(req
->file
)) {
2341 req
->flags
|= REQ_F_MUST_PUNT
;
2345 /* file path doesn't support NOWAIT for non-direct_IO */
2346 if (force_nonblock
&& !(kiocb
->ki_flags
& IOCB_DIRECT
) &&
2347 (req
->flags
& REQ_F_ISREG
))
2350 iov_count
= iov_iter_count(&iter
);
2351 ret
= rw_verify_area(WRITE
, req
->file
, &kiocb
->ki_pos
, iov_count
);
2356 * Open-code file_start_write here to grab freeze protection,
2357 * which will be released by another thread in
2358 * io_complete_rw(). Fool lockdep by telling it the lock got
2359 * released so that it doesn't complain about the held lock when
2360 * we return to userspace.
2362 if (req
->flags
& REQ_F_ISREG
) {
2363 __sb_start_write(file_inode(req
->file
)->i_sb
,
2364 SB_FREEZE_WRITE
, true);
2365 __sb_writers_release(file_inode(req
->file
)->i_sb
,
2368 kiocb
->ki_flags
|= IOCB_WRITE
;
2370 if (!force_nonblock
)
2371 current
->signal
->rlim
[RLIMIT_FSIZE
].rlim_cur
= req
->fsize
;
2373 if (req
->file
->f_op
->write_iter
)
2374 ret2
= call_write_iter(req
->file
, kiocb
, &iter
);
2376 ret2
= loop_rw_iter(WRITE
, req
->file
, kiocb
, &iter
);
2378 if (!force_nonblock
)
2379 current
->signal
->rlim
[RLIMIT_FSIZE
].rlim_cur
= RLIM_INFINITY
;
2382 * Raw bdev writes will -EOPNOTSUPP for IOCB_NOWAIT. Just
2383 * retry them without IOCB_NOWAIT.
2385 if (ret2
== -EOPNOTSUPP
&& (kiocb
->ki_flags
& IOCB_NOWAIT
))
2387 if (!force_nonblock
|| ret2
!= -EAGAIN
) {
2388 kiocb_done(kiocb
, ret2
, nxt
, req
->in_async
);
2391 ret
= io_setup_async_rw(req
, io_size
, iovec
,
2392 inline_vecs
, &iter
);
2399 req
->flags
&= ~REQ_F_NEED_CLEANUP
;
2405 * IORING_OP_NOP just posts a completion event, nothing else.
2407 static int io_nop(struct io_kiocb
*req
)
2409 struct io_ring_ctx
*ctx
= req
->ctx
;
2411 if (unlikely(ctx
->flags
& IORING_SETUP_IOPOLL
))
2414 io_cqring_add_event(req
, 0);
2419 static int io_prep_fsync(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
2421 struct io_ring_ctx
*ctx
= req
->ctx
;
2426 if (unlikely(ctx
->flags
& IORING_SETUP_IOPOLL
))
2428 if (unlikely(sqe
->addr
|| sqe
->ioprio
|| sqe
->buf_index
))
2431 req
->sync
.flags
= READ_ONCE(sqe
->fsync_flags
);
2432 if (unlikely(req
->sync
.flags
& ~IORING_FSYNC_DATASYNC
))
2435 req
->sync
.off
= READ_ONCE(sqe
->off
);
2436 req
->sync
.len
= READ_ONCE(sqe
->len
);
2440 static bool io_req_cancelled(struct io_kiocb
*req
)
2442 if (req
->work
.flags
& IO_WQ_WORK_CANCEL
) {
2443 req_set_fail_links(req
);
2444 io_cqring_add_event(req
, -ECANCELED
);
2452 static void io_link_work_cb(struct io_wq_work
**workptr
)
2454 struct io_wq_work
*work
= *workptr
;
2455 struct io_kiocb
*link
= work
->data
;
2457 io_queue_linked_timeout(link
);
2458 work
->func
= io_wq_submit_work
;
2461 static void io_wq_assign_next(struct io_wq_work
**workptr
, struct io_kiocb
*nxt
)
2463 struct io_kiocb
*link
;
2465 io_prep_async_work(nxt
, &link
);
2466 *workptr
= &nxt
->work
;
2468 nxt
->work
.flags
|= IO_WQ_WORK_CB
;
2469 nxt
->work
.func
= io_link_work_cb
;
2470 nxt
->work
.data
= link
;
2474 static void io_fsync_finish(struct io_wq_work
**workptr
)
2476 struct io_kiocb
*req
= container_of(*workptr
, struct io_kiocb
, work
);
2477 loff_t end
= req
->sync
.off
+ req
->sync
.len
;
2478 struct io_kiocb
*nxt
= NULL
;
2481 if (io_req_cancelled(req
))
2484 ret
= vfs_fsync_range(req
->file
, req
->sync
.off
,
2485 end
> 0 ? end
: LLONG_MAX
,
2486 req
->sync
.flags
& IORING_FSYNC_DATASYNC
);
2488 req_set_fail_links(req
);
2489 io_cqring_add_event(req
, ret
);
2490 io_put_req_find_next(req
, &nxt
);
2492 io_wq_assign_next(workptr
, nxt
);
2495 static int io_fsync(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
2496 bool force_nonblock
)
2498 struct io_wq_work
*work
, *old_work
;
2500 /* fsync always requires a blocking context */
2501 if (force_nonblock
) {
2503 req
->work
.func
= io_fsync_finish
;
2507 work
= old_work
= &req
->work
;
2508 io_fsync_finish(&work
);
2509 if (work
&& work
!= old_work
)
2510 *nxt
= container_of(work
, struct io_kiocb
, work
);
2514 static void io_fallocate_finish(struct io_wq_work
**workptr
)
2516 struct io_kiocb
*req
= container_of(*workptr
, struct io_kiocb
, work
);
2517 struct io_kiocb
*nxt
= NULL
;
2520 if (io_req_cancelled(req
))
2523 current
->signal
->rlim
[RLIMIT_FSIZE
].rlim_cur
= req
->fsize
;
2524 ret
= vfs_fallocate(req
->file
, req
->sync
.mode
, req
->sync
.off
,
2526 current
->signal
->rlim
[RLIMIT_FSIZE
].rlim_cur
= RLIM_INFINITY
;
2528 req_set_fail_links(req
);
2529 io_cqring_add_event(req
, ret
);
2530 io_put_req_find_next(req
, &nxt
);
2532 io_wq_assign_next(workptr
, nxt
);
2535 static int io_fallocate_prep(struct io_kiocb
*req
,
2536 const struct io_uring_sqe
*sqe
)
2538 if (sqe
->ioprio
|| sqe
->buf_index
|| sqe
->rw_flags
)
2541 req
->sync
.off
= READ_ONCE(sqe
->off
);
2542 req
->sync
.len
= READ_ONCE(sqe
->addr
);
2543 req
->sync
.mode
= READ_ONCE(sqe
->len
);
2544 req
->fsize
= rlimit(RLIMIT_FSIZE
);
2548 static int io_fallocate(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
2549 bool force_nonblock
)
2551 struct io_wq_work
*work
, *old_work
;
2553 /* fallocate always requiring blocking context */
2554 if (force_nonblock
) {
2556 req
->work
.func
= io_fallocate_finish
;
2560 work
= old_work
= &req
->work
;
2561 io_fallocate_finish(&work
);
2562 if (work
&& work
!= old_work
)
2563 *nxt
= container_of(work
, struct io_kiocb
, work
);
2568 static int io_openat_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
2570 const char __user
*fname
;
2573 if (sqe
->ioprio
|| sqe
->buf_index
)
2575 if (sqe
->flags
& IOSQE_FIXED_FILE
)
2577 if (req
->flags
& REQ_F_NEED_CLEANUP
)
2580 req
->open
.dfd
= READ_ONCE(sqe
->fd
);
2581 req
->open
.how
.mode
= READ_ONCE(sqe
->len
);
2582 fname
= u64_to_user_ptr(READ_ONCE(sqe
->addr
));
2583 req
->open
.how
.flags
= READ_ONCE(sqe
->open_flags
);
2584 if (force_o_largefile())
2585 req
->open
.how
.flags
|= O_LARGEFILE
;
2587 req
->open
.filename
= getname(fname
);
2588 if (IS_ERR(req
->open
.filename
)) {
2589 ret
= PTR_ERR(req
->open
.filename
);
2590 req
->open
.filename
= NULL
;
2594 req
->open
.nofile
= rlimit(RLIMIT_NOFILE
);
2595 req
->flags
|= REQ_F_NEED_CLEANUP
;
2599 static int io_openat2_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
2601 struct open_how __user
*how
;
2602 const char __user
*fname
;
2606 if (sqe
->ioprio
|| sqe
->buf_index
)
2608 if (sqe
->flags
& IOSQE_FIXED_FILE
)
2610 if (req
->flags
& REQ_F_NEED_CLEANUP
)
2613 req
->open
.dfd
= READ_ONCE(sqe
->fd
);
2614 fname
= u64_to_user_ptr(READ_ONCE(sqe
->addr
));
2615 how
= u64_to_user_ptr(READ_ONCE(sqe
->addr2
));
2616 len
= READ_ONCE(sqe
->len
);
2618 if (len
< OPEN_HOW_SIZE_VER0
)
2621 ret
= copy_struct_from_user(&req
->open
.how
, sizeof(req
->open
.how
), how
,
2626 if (!(req
->open
.how
.flags
& O_PATH
) && force_o_largefile())
2627 req
->open
.how
.flags
|= O_LARGEFILE
;
2629 req
->open
.filename
= getname(fname
);
2630 if (IS_ERR(req
->open
.filename
)) {
2631 ret
= PTR_ERR(req
->open
.filename
);
2632 req
->open
.filename
= NULL
;
2636 req
->open
.nofile
= rlimit(RLIMIT_NOFILE
);
2637 req
->flags
|= REQ_F_NEED_CLEANUP
;
2641 static int io_openat2(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
2642 bool force_nonblock
)
2644 struct open_flags op
;
2651 ret
= build_open_flags(&req
->open
.how
, &op
);
2655 ret
= __get_unused_fd_flags(req
->open
.how
.flags
, req
->open
.nofile
);
2659 file
= do_filp_open(req
->open
.dfd
, req
->open
.filename
, &op
);
2662 ret
= PTR_ERR(file
);
2664 fsnotify_open(file
);
2665 fd_install(ret
, file
);
2668 putname(req
->open
.filename
);
2669 req
->flags
&= ~REQ_F_NEED_CLEANUP
;
2671 req_set_fail_links(req
);
2672 io_cqring_add_event(req
, ret
);
2673 io_put_req_find_next(req
, nxt
);
2677 static int io_openat(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
2678 bool force_nonblock
)
2680 req
->open
.how
= build_open_how(req
->open
.how
.flags
, req
->open
.how
.mode
);
2681 return io_openat2(req
, nxt
, force_nonblock
);
2684 static int io_epoll_ctl_prep(struct io_kiocb
*req
,
2685 const struct io_uring_sqe
*sqe
)
2687 #if defined(CONFIG_EPOLL)
2688 if (sqe
->ioprio
|| sqe
->buf_index
)
2691 req
->epoll
.epfd
= READ_ONCE(sqe
->fd
);
2692 req
->epoll
.op
= READ_ONCE(sqe
->len
);
2693 req
->epoll
.fd
= READ_ONCE(sqe
->off
);
2695 if (ep_op_has_event(req
->epoll
.op
)) {
2696 struct epoll_event __user
*ev
;
2698 ev
= u64_to_user_ptr(READ_ONCE(sqe
->addr
));
2699 if (copy_from_user(&req
->epoll
.event
, ev
, sizeof(*ev
)))
2709 static int io_epoll_ctl(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
2710 bool force_nonblock
)
2712 #if defined(CONFIG_EPOLL)
2713 struct io_epoll
*ie
= &req
->epoll
;
2716 ret
= do_epoll_ctl(ie
->epfd
, ie
->op
, ie
->fd
, &ie
->event
, force_nonblock
);
2717 if (force_nonblock
&& ret
== -EAGAIN
)
2721 req_set_fail_links(req
);
2722 io_cqring_add_event(req
, ret
);
2723 io_put_req_find_next(req
, nxt
);
2730 static int io_madvise_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
2732 #if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
2733 if (sqe
->ioprio
|| sqe
->buf_index
|| sqe
->off
)
2736 req
->madvise
.addr
= READ_ONCE(sqe
->addr
);
2737 req
->madvise
.len
= READ_ONCE(sqe
->len
);
2738 req
->madvise
.advice
= READ_ONCE(sqe
->fadvise_advice
);
2745 static int io_madvise(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
2746 bool force_nonblock
)
2748 #if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
2749 struct io_madvise
*ma
= &req
->madvise
;
2755 ret
= do_madvise(ma
->addr
, ma
->len
, ma
->advice
);
2757 req_set_fail_links(req
);
2758 io_cqring_add_event(req
, ret
);
2759 io_put_req_find_next(req
, nxt
);
2766 static int io_fadvise_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
2768 if (sqe
->ioprio
|| sqe
->buf_index
|| sqe
->addr
)
2771 req
->fadvise
.offset
= READ_ONCE(sqe
->off
);
2772 req
->fadvise
.len
= READ_ONCE(sqe
->len
);
2773 req
->fadvise
.advice
= READ_ONCE(sqe
->fadvise_advice
);
2777 static int io_fadvise(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
2778 bool force_nonblock
)
2780 struct io_fadvise
*fa
= &req
->fadvise
;
2783 if (force_nonblock
) {
2784 switch (fa
->advice
) {
2785 case POSIX_FADV_NORMAL
:
2786 case POSIX_FADV_RANDOM
:
2787 case POSIX_FADV_SEQUENTIAL
:
2794 ret
= vfs_fadvise(req
->file
, fa
->offset
, fa
->len
, fa
->advice
);
2796 req_set_fail_links(req
);
2797 io_cqring_add_event(req
, ret
);
2798 io_put_req_find_next(req
, nxt
);
2802 static int io_statx_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
2804 const char __user
*fname
;
2805 unsigned lookup_flags
;
2808 if (sqe
->ioprio
|| sqe
->buf_index
)
2810 if (sqe
->flags
& IOSQE_FIXED_FILE
)
2812 if (req
->flags
& REQ_F_NEED_CLEANUP
)
2815 req
->open
.dfd
= READ_ONCE(sqe
->fd
);
2816 req
->open
.mask
= READ_ONCE(sqe
->len
);
2817 fname
= u64_to_user_ptr(READ_ONCE(sqe
->addr
));
2818 req
->open
.buffer
= u64_to_user_ptr(READ_ONCE(sqe
->addr2
));
2819 req
->open
.how
.flags
= READ_ONCE(sqe
->statx_flags
);
2821 if (vfs_stat_set_lookup_flags(&lookup_flags
, req
->open
.how
.flags
))
2824 req
->open
.filename
= getname_flags(fname
, lookup_flags
, NULL
);
2825 if (IS_ERR(req
->open
.filename
)) {
2826 ret
= PTR_ERR(req
->open
.filename
);
2827 req
->open
.filename
= NULL
;
2831 req
->flags
|= REQ_F_NEED_CLEANUP
;
2835 static int io_statx(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
2836 bool force_nonblock
)
2838 struct io_open
*ctx
= &req
->open
;
2839 unsigned lookup_flags
;
2844 if (force_nonblock
) {
2845 /* only need file table for an actual valid fd */
2846 if (ctx
->dfd
== -1 || ctx
->dfd
== AT_FDCWD
)
2847 req
->flags
|= REQ_F_NO_FILE_TABLE
;
2851 if (vfs_stat_set_lookup_flags(&lookup_flags
, ctx
->how
.flags
))
2855 /* filename_lookup() drops it, keep a reference */
2856 ctx
->filename
->refcnt
++;
2858 ret
= filename_lookup(ctx
->dfd
, ctx
->filename
, lookup_flags
, &path
,
2863 ret
= vfs_getattr(&path
, &stat
, ctx
->mask
, ctx
->how
.flags
);
2865 if (retry_estale(ret
, lookup_flags
)) {
2866 lookup_flags
|= LOOKUP_REVAL
;
2870 ret
= cp_statx(&stat
, ctx
->buffer
);
2872 putname(ctx
->filename
);
2873 req
->flags
&= ~REQ_F_NEED_CLEANUP
;
2875 req_set_fail_links(req
);
2876 io_cqring_add_event(req
, ret
);
2877 io_put_req_find_next(req
, nxt
);
2881 static int io_close_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
2884 * If we queue this for async, it must not be cancellable. That would
2885 * leave the 'file' in an undeterminate state.
2887 req
->work
.flags
|= IO_WQ_WORK_NO_CANCEL
;
2889 if (sqe
->ioprio
|| sqe
->off
|| sqe
->addr
|| sqe
->len
||
2890 sqe
->rw_flags
|| sqe
->buf_index
)
2892 if (sqe
->flags
& IOSQE_FIXED_FILE
)
2895 req
->close
.fd
= READ_ONCE(sqe
->fd
);
2896 if (req
->file
->f_op
== &io_uring_fops
||
2897 req
->close
.fd
== req
->ctx
->ring_fd
)
2903 /* only called when __close_fd_get_file() is done */
2904 static void __io_close_finish(struct io_kiocb
*req
, struct io_kiocb
**nxt
)
2908 ret
= filp_close(req
->close
.put_file
, req
->work
.files
);
2910 req_set_fail_links(req
);
2911 io_cqring_add_event(req
, ret
);
2912 fput(req
->close
.put_file
);
2913 io_put_req_find_next(req
, nxt
);
2916 static void io_close_finish(struct io_wq_work
**workptr
)
2918 struct io_kiocb
*req
= container_of(*workptr
, struct io_kiocb
, work
);
2919 struct io_kiocb
*nxt
= NULL
;
2921 /* not cancellable, don't do io_req_cancelled() */
2922 __io_close_finish(req
, &nxt
);
2924 io_wq_assign_next(workptr
, nxt
);
2927 static int io_close(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
2928 bool force_nonblock
)
2932 req
->close
.put_file
= NULL
;
2933 ret
= __close_fd_get_file(req
->close
.fd
, &req
->close
.put_file
);
2937 /* if the file has a flush method, be safe and punt to async */
2938 if (req
->close
.put_file
->f_op
->flush
&& !io_wq_current_is_worker())
2942 * No ->flush(), safely close from here and just punt the
2943 * fput() to async context.
2945 __io_close_finish(req
, nxt
);
2948 req
->work
.func
= io_close_finish
;
2950 * Do manual async queue here to avoid grabbing files - we don't
2951 * need the files, and it'll cause io_close_finish() to close
2952 * the file again and cause a double CQE entry for this request
2954 io_queue_async_work(req
);
2958 static int io_prep_sfr(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
2960 struct io_ring_ctx
*ctx
= req
->ctx
;
2965 if (unlikely(ctx
->flags
& IORING_SETUP_IOPOLL
))
2967 if (unlikely(sqe
->addr
|| sqe
->ioprio
|| sqe
->buf_index
))
2970 req
->sync
.off
= READ_ONCE(sqe
->off
);
2971 req
->sync
.len
= READ_ONCE(sqe
->len
);
2972 req
->sync
.flags
= READ_ONCE(sqe
->sync_range_flags
);
2976 static void io_sync_file_range_finish(struct io_wq_work
**workptr
)
2978 struct io_kiocb
*req
= container_of(*workptr
, struct io_kiocb
, work
);
2979 struct io_kiocb
*nxt
= NULL
;
2982 if (io_req_cancelled(req
))
2985 ret
= sync_file_range(req
->file
, req
->sync
.off
, req
->sync
.len
,
2988 req_set_fail_links(req
);
2989 io_cqring_add_event(req
, ret
);
2990 io_put_req_find_next(req
, &nxt
);
2992 io_wq_assign_next(workptr
, nxt
);
2995 static int io_sync_file_range(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
2996 bool force_nonblock
)
2998 struct io_wq_work
*work
, *old_work
;
3000 /* sync_file_range always requires a blocking context */
3001 if (force_nonblock
) {
3003 req
->work
.func
= io_sync_file_range_finish
;
3007 work
= old_work
= &req
->work
;
3008 io_sync_file_range_finish(&work
);
3009 if (work
&& work
!= old_work
)
3010 *nxt
= container_of(work
, struct io_kiocb
, work
);
3014 static int io_sendmsg_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
3016 #if defined(CONFIG_NET)
3017 struct io_sr_msg
*sr
= &req
->sr_msg
;
3018 struct io_async_ctx
*io
= req
->io
;
3021 sr
->msg_flags
= READ_ONCE(sqe
->msg_flags
);
3022 sr
->msg
= u64_to_user_ptr(READ_ONCE(sqe
->addr
));
3023 sr
->len
= READ_ONCE(sqe
->len
);
3025 #ifdef CONFIG_COMPAT
3026 if (req
->ctx
->compat
)
3027 sr
->msg_flags
|= MSG_CMSG_COMPAT
;
3030 if (!io
|| req
->opcode
== IORING_OP_SEND
)
3032 /* iovec is already imported */
3033 if (req
->flags
& REQ_F_NEED_CLEANUP
)
3036 io
->msg
.iov
= io
->msg
.fast_iov
;
3037 ret
= sendmsg_copy_msghdr(&io
->msg
.msg
, sr
->msg
, sr
->msg_flags
,
3040 req
->flags
|= REQ_F_NEED_CLEANUP
;
3047 static int io_sendmsg(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
3048 bool force_nonblock
)
3050 #if defined(CONFIG_NET)
3051 struct io_async_msghdr
*kmsg
= NULL
;
3052 struct socket
*sock
;
3055 if (unlikely(req
->ctx
->flags
& IORING_SETUP_IOPOLL
))
3058 sock
= sock_from_file(req
->file
, &ret
);
3060 struct io_async_ctx io
;
3064 kmsg
= &req
->io
->msg
;
3065 kmsg
->msg
.msg_name
= &req
->io
->msg
.addr
;
3066 /* if iov is set, it's allocated already */
3068 kmsg
->iov
= kmsg
->fast_iov
;
3069 kmsg
->msg
.msg_iter
.iov
= kmsg
->iov
;
3071 struct io_sr_msg
*sr
= &req
->sr_msg
;
3074 kmsg
->msg
.msg_name
= &io
.msg
.addr
;
3076 io
.msg
.iov
= io
.msg
.fast_iov
;
3077 ret
= sendmsg_copy_msghdr(&io
.msg
.msg
, sr
->msg
,
3078 sr
->msg_flags
, &io
.msg
.iov
);
3083 flags
= req
->sr_msg
.msg_flags
;
3084 if (flags
& MSG_DONTWAIT
)
3085 req
->flags
|= REQ_F_NOWAIT
;
3086 else if (force_nonblock
)
3087 flags
|= MSG_DONTWAIT
;
3089 ret
= __sys_sendmsg_sock(sock
, &kmsg
->msg
, flags
);
3090 if (force_nonblock
&& ret
== -EAGAIN
) {
3093 if (io_alloc_async_ctx(req
)) {
3094 if (kmsg
->iov
!= kmsg
->fast_iov
)
3098 req
->flags
|= REQ_F_NEED_CLEANUP
;
3099 memcpy(&req
->io
->msg
, &io
.msg
, sizeof(io
.msg
));
3102 if (ret
== -ERESTARTSYS
)
3106 if (kmsg
&& kmsg
->iov
!= kmsg
->fast_iov
)
3108 req
->flags
&= ~REQ_F_NEED_CLEANUP
;
3109 io_cqring_add_event(req
, ret
);
3111 req_set_fail_links(req
);
3112 io_put_req_find_next(req
, nxt
);
3119 static int io_send(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
3120 bool force_nonblock
)
3122 #if defined(CONFIG_NET)
3123 struct socket
*sock
;
3126 if (unlikely(req
->ctx
->flags
& IORING_SETUP_IOPOLL
))
3129 sock
= sock_from_file(req
->file
, &ret
);
3131 struct io_sr_msg
*sr
= &req
->sr_msg
;
3136 ret
= import_single_range(WRITE
, sr
->buf
, sr
->len
, &iov
,
3141 msg
.msg_name
= NULL
;
3142 msg
.msg_control
= NULL
;
3143 msg
.msg_controllen
= 0;
3144 msg
.msg_namelen
= 0;
3146 flags
= req
->sr_msg
.msg_flags
;
3147 if (flags
& MSG_DONTWAIT
)
3148 req
->flags
|= REQ_F_NOWAIT
;
3149 else if (force_nonblock
)
3150 flags
|= MSG_DONTWAIT
;
3152 msg
.msg_flags
= flags
;
3153 ret
= sock_sendmsg(sock
, &msg
);
3154 if (force_nonblock
&& ret
== -EAGAIN
)
3156 if (ret
== -ERESTARTSYS
)
3160 io_cqring_add_event(req
, ret
);
3162 req_set_fail_links(req
);
3163 io_put_req_find_next(req
, nxt
);
3170 static int io_recvmsg_prep(struct io_kiocb
*req
,
3171 const struct io_uring_sqe
*sqe
)
3173 #if defined(CONFIG_NET)
3174 struct io_sr_msg
*sr
= &req
->sr_msg
;
3175 struct io_async_ctx
*io
= req
->io
;
3178 sr
->msg_flags
= READ_ONCE(sqe
->msg_flags
);
3179 sr
->msg
= u64_to_user_ptr(READ_ONCE(sqe
->addr
));
3180 sr
->len
= READ_ONCE(sqe
->len
);
3182 #ifdef CONFIG_COMPAT
3183 if (req
->ctx
->compat
)
3184 sr
->msg_flags
|= MSG_CMSG_COMPAT
;
3187 if (!io
|| req
->opcode
== IORING_OP_RECV
)
3189 /* iovec is already imported */
3190 if (req
->flags
& REQ_F_NEED_CLEANUP
)
3193 io
->msg
.iov
= io
->msg
.fast_iov
;
3194 ret
= recvmsg_copy_msghdr(&io
->msg
.msg
, sr
->msg
, sr
->msg_flags
,
3195 &io
->msg
.uaddr
, &io
->msg
.iov
);
3197 req
->flags
|= REQ_F_NEED_CLEANUP
;
3204 static int io_recvmsg(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
3205 bool force_nonblock
)
3207 #if defined(CONFIG_NET)
3208 struct io_async_msghdr
*kmsg
= NULL
;
3209 struct socket
*sock
;
3212 if (unlikely(req
->ctx
->flags
& IORING_SETUP_IOPOLL
))
3215 sock
= sock_from_file(req
->file
, &ret
);
3217 struct io_async_ctx io
;
3221 kmsg
= &req
->io
->msg
;
3222 kmsg
->msg
.msg_name
= &req
->io
->msg
.addr
;
3223 /* if iov is set, it's allocated already */
3225 kmsg
->iov
= kmsg
->fast_iov
;
3226 kmsg
->msg
.msg_iter
.iov
= kmsg
->iov
;
3228 struct io_sr_msg
*sr
= &req
->sr_msg
;
3231 kmsg
->msg
.msg_name
= &io
.msg
.addr
;
3233 io
.msg
.iov
= io
.msg
.fast_iov
;
3234 ret
= recvmsg_copy_msghdr(&io
.msg
.msg
, sr
->msg
,
3235 sr
->msg_flags
, &io
.msg
.uaddr
,
3241 flags
= req
->sr_msg
.msg_flags
;
3242 if (flags
& MSG_DONTWAIT
)
3243 req
->flags
|= REQ_F_NOWAIT
;
3244 else if (force_nonblock
)
3245 flags
|= MSG_DONTWAIT
;
3247 ret
= __sys_recvmsg_sock(sock
, &kmsg
->msg
, req
->sr_msg
.msg
,
3248 kmsg
->uaddr
, flags
);
3249 if (force_nonblock
&& ret
== -EAGAIN
) {
3252 if (io_alloc_async_ctx(req
)) {
3253 if (kmsg
->iov
!= kmsg
->fast_iov
)
3257 memcpy(&req
->io
->msg
, &io
.msg
, sizeof(io
.msg
));
3258 req
->flags
|= REQ_F_NEED_CLEANUP
;
3261 if (ret
== -ERESTARTSYS
)
3265 if (kmsg
&& kmsg
->iov
!= kmsg
->fast_iov
)
3267 req
->flags
&= ~REQ_F_NEED_CLEANUP
;
3268 io_cqring_add_event(req
, ret
);
3270 req_set_fail_links(req
);
3271 io_put_req_find_next(req
, nxt
);
3278 static int io_recv(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
3279 bool force_nonblock
)
3281 #if defined(CONFIG_NET)
3282 struct socket
*sock
;
3285 if (unlikely(req
->ctx
->flags
& IORING_SETUP_IOPOLL
))
3288 sock
= sock_from_file(req
->file
, &ret
);
3290 struct io_sr_msg
*sr
= &req
->sr_msg
;
3295 ret
= import_single_range(READ
, sr
->buf
, sr
->len
, &iov
,
3300 msg
.msg_name
= NULL
;
3301 msg
.msg_control
= NULL
;
3302 msg
.msg_controllen
= 0;
3303 msg
.msg_namelen
= 0;
3304 msg
.msg_iocb
= NULL
;
3307 flags
= req
->sr_msg
.msg_flags
;
3308 if (flags
& MSG_DONTWAIT
)
3309 req
->flags
|= REQ_F_NOWAIT
;
3310 else if (force_nonblock
)
3311 flags
|= MSG_DONTWAIT
;
3313 ret
= sock_recvmsg(sock
, &msg
, flags
);
3314 if (force_nonblock
&& ret
== -EAGAIN
)
3316 if (ret
== -ERESTARTSYS
)
3320 io_cqring_add_event(req
, ret
);
3322 req_set_fail_links(req
);
3323 io_put_req_find_next(req
, nxt
);
3331 static int io_accept_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
3333 #if defined(CONFIG_NET)
3334 struct io_accept
*accept
= &req
->accept
;
3336 if (unlikely(req
->ctx
->flags
& (IORING_SETUP_IOPOLL
|IORING_SETUP_SQPOLL
)))
3338 if (sqe
->ioprio
|| sqe
->len
|| sqe
->buf_index
)
3341 accept
->addr
= u64_to_user_ptr(READ_ONCE(sqe
->addr
));
3342 accept
->addr_len
= u64_to_user_ptr(READ_ONCE(sqe
->addr2
));
3343 accept
->flags
= READ_ONCE(sqe
->accept_flags
);
3344 accept
->nofile
= rlimit(RLIMIT_NOFILE
);
3351 #if defined(CONFIG_NET)
3352 static int __io_accept(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
3353 bool force_nonblock
)
3355 struct io_accept
*accept
= &req
->accept
;
3356 unsigned file_flags
;
3359 file_flags
= force_nonblock
? O_NONBLOCK
: 0;
3360 ret
= __sys_accept4_file(req
->file
, file_flags
, accept
->addr
,
3361 accept
->addr_len
, accept
->flags
,
3363 if (ret
== -EAGAIN
&& force_nonblock
)
3365 if (ret
== -ERESTARTSYS
)
3368 req_set_fail_links(req
);
3369 io_cqring_add_event(req
, ret
);
3370 io_put_req_find_next(req
, nxt
);
3374 static void io_accept_finish(struct io_wq_work
**workptr
)
3376 struct io_kiocb
*req
= container_of(*workptr
, struct io_kiocb
, work
);
3377 struct io_kiocb
*nxt
= NULL
;
3379 if (io_req_cancelled(req
))
3381 __io_accept(req
, &nxt
, false);
3383 io_wq_assign_next(workptr
, nxt
);
3387 static int io_accept(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
3388 bool force_nonblock
)
3390 #if defined(CONFIG_NET)
3393 ret
= __io_accept(req
, nxt
, force_nonblock
);
3394 if (ret
== -EAGAIN
&& force_nonblock
) {
3395 req
->work
.func
= io_accept_finish
;
3405 static int io_connect_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
3407 #if defined(CONFIG_NET)
3408 struct io_connect
*conn
= &req
->connect
;
3409 struct io_async_ctx
*io
= req
->io
;
3411 if (unlikely(req
->ctx
->flags
& (IORING_SETUP_IOPOLL
|IORING_SETUP_SQPOLL
)))
3413 if (sqe
->ioprio
|| sqe
->len
|| sqe
->buf_index
|| sqe
->rw_flags
)
3416 conn
->addr
= u64_to_user_ptr(READ_ONCE(sqe
->addr
));
3417 conn
->addr_len
= READ_ONCE(sqe
->addr2
);
3422 return move_addr_to_kernel(conn
->addr
, conn
->addr_len
,
3423 &io
->connect
.address
);
3429 static int io_connect(struct io_kiocb
*req
, struct io_kiocb
**nxt
,
3430 bool force_nonblock
)
3432 #if defined(CONFIG_NET)
3433 struct io_async_ctx __io
, *io
;
3434 unsigned file_flags
;
3440 ret
= move_addr_to_kernel(req
->connect
.addr
,
3441 req
->connect
.addr_len
,
3442 &__io
.connect
.address
);
3448 file_flags
= force_nonblock
? O_NONBLOCK
: 0;
3450 ret
= __sys_connect_file(req
->file
, &io
->connect
.address
,
3451 req
->connect
.addr_len
, file_flags
);
3452 if ((ret
== -EAGAIN
|| ret
== -EINPROGRESS
) && force_nonblock
) {
3455 if (io_alloc_async_ctx(req
)) {
3459 memcpy(&req
->io
->connect
, &__io
.connect
, sizeof(__io
.connect
));
3462 if (ret
== -ERESTARTSYS
)
3466 req_set_fail_links(req
);
3467 io_cqring_add_event(req
, ret
);
3468 io_put_req_find_next(req
, nxt
);
3475 static void io_poll_remove_one(struct io_kiocb
*req
)
3477 struct io_poll_iocb
*poll
= &req
->poll
;
3479 spin_lock(&poll
->head
->lock
);
3480 WRITE_ONCE(poll
->canceled
, true);
3481 if (!list_empty(&poll
->wait
.entry
)) {
3482 list_del_init(&poll
->wait
.entry
);
3483 io_queue_async_work(req
);
3485 spin_unlock(&poll
->head
->lock
);
3486 hash_del(&req
->hash_node
);
3489 static void io_poll_remove_all(struct io_ring_ctx
*ctx
)
3491 struct hlist_node
*tmp
;
3492 struct io_kiocb
*req
;
3495 spin_lock_irq(&ctx
->completion_lock
);
3496 for (i
= 0; i
< (1U << ctx
->cancel_hash_bits
); i
++) {
3497 struct hlist_head
*list
;
3499 list
= &ctx
->cancel_hash
[i
];
3500 hlist_for_each_entry_safe(req
, tmp
, list
, hash_node
)
3501 io_poll_remove_one(req
);
3503 spin_unlock_irq(&ctx
->completion_lock
);
3506 static int io_poll_cancel(struct io_ring_ctx
*ctx
, __u64 sqe_addr
)
3508 struct hlist_head
*list
;
3509 struct io_kiocb
*req
;
3511 list
= &ctx
->cancel_hash
[hash_long(sqe_addr
, ctx
->cancel_hash_bits
)];
3512 hlist_for_each_entry(req
, list
, hash_node
) {
3513 if (sqe_addr
== req
->user_data
) {
3514 io_poll_remove_one(req
);
3522 static int io_poll_remove_prep(struct io_kiocb
*req
,
3523 const struct io_uring_sqe
*sqe
)
3525 if (unlikely(req
->ctx
->flags
& IORING_SETUP_IOPOLL
))
3527 if (sqe
->ioprio
|| sqe
->off
|| sqe
->len
|| sqe
->buf_index
||
3531 req
->poll
.addr
= READ_ONCE(sqe
->addr
);
3536 * Find a running poll command that matches one specified in sqe->addr,
3537 * and remove it if found.
3539 static int io_poll_remove(struct io_kiocb
*req
)
3541 struct io_ring_ctx
*ctx
= req
->ctx
;
3545 addr
= req
->poll
.addr
;
3546 spin_lock_irq(&ctx
->completion_lock
);
3547 ret
= io_poll_cancel(ctx
, addr
);
3548 spin_unlock_irq(&ctx
->completion_lock
);
3550 io_cqring_add_event(req
, ret
);
3552 req_set_fail_links(req
);
3557 static void io_poll_complete(struct io_kiocb
*req
, __poll_t mask
, int error
)
3559 struct io_ring_ctx
*ctx
= req
->ctx
;
3561 req
->poll
.done
= true;
3563 io_cqring_fill_event(req
, error
);
3565 io_cqring_fill_event(req
, mangle_poll(mask
));
3566 io_commit_cqring(ctx
);
3569 static void io_poll_complete_work(struct io_wq_work
**workptr
)
3571 struct io_wq_work
*work
= *workptr
;
3572 struct io_kiocb
*req
= container_of(work
, struct io_kiocb
, work
);
3573 struct io_poll_iocb
*poll
= &req
->poll
;
3574 struct poll_table_struct pt
= { ._key
= poll
->events
};
3575 struct io_ring_ctx
*ctx
= req
->ctx
;
3576 struct io_kiocb
*nxt
= NULL
;
3580 if (work
->flags
& IO_WQ_WORK_CANCEL
) {
3581 WRITE_ONCE(poll
->canceled
, true);
3583 } else if (READ_ONCE(poll
->canceled
)) {
3587 if (ret
!= -ECANCELED
)
3588 mask
= vfs_poll(poll
->file
, &pt
) & poll
->events
;
3591 * Note that ->ki_cancel callers also delete iocb from active_reqs after
3592 * calling ->ki_cancel. We need the ctx_lock roundtrip here to
3593 * synchronize with them. In the cancellation case the list_del_init
3594 * itself is not actually needed, but harmless so we keep it in to
3595 * avoid further branches in the fast path.
3597 spin_lock_irq(&ctx
->completion_lock
);
3598 if (!mask
&& ret
!= -ECANCELED
) {
3599 add_wait_queue(poll
->head
, &poll
->wait
);
3600 spin_unlock_irq(&ctx
->completion_lock
);
3603 hash_del(&req
->hash_node
);
3604 io_poll_complete(req
, mask
, ret
);
3605 spin_unlock_irq(&ctx
->completion_lock
);
3607 io_cqring_ev_posted(ctx
);
3610 req_set_fail_links(req
);
3611 io_put_req_find_next(req
, &nxt
);
3613 io_wq_assign_next(workptr
, nxt
);
3616 static void __io_poll_flush(struct io_ring_ctx
*ctx
, struct llist_node
*nodes
)
3618 struct io_kiocb
*req
, *tmp
;
3619 struct req_batch rb
;
3621 rb
.to_free
= rb
.need_iter
= 0;
3622 spin_lock_irq(&ctx
->completion_lock
);
3623 llist_for_each_entry_safe(req
, tmp
, nodes
, llist_node
) {
3624 hash_del(&req
->hash_node
);
3625 io_poll_complete(req
, req
->result
, 0);
3627 if (refcount_dec_and_test(&req
->refs
) &&
3628 !io_req_multi_free(&rb
, req
)) {
3629 req
->flags
|= REQ_F_COMP_LOCKED
;
3633 spin_unlock_irq(&ctx
->completion_lock
);
3635 io_cqring_ev_posted(ctx
);
3636 io_free_req_many(ctx
, &rb
);
3639 static void io_poll_flush(struct io_wq_work
**workptr
)
3641 struct io_kiocb
*req
= container_of(*workptr
, struct io_kiocb
, work
);
3642 struct llist_node
*nodes
;
3644 nodes
= llist_del_all(&req
->ctx
->poll_llist
);
3646 __io_poll_flush(req
->ctx
, nodes
);
3649 static void io_poll_trigger_evfd(struct io_wq_work
**workptr
)
3651 struct io_kiocb
*req
= container_of(*workptr
, struct io_kiocb
, work
);
3653 eventfd_signal(req
->ctx
->cq_ev_fd
, 1);
3657 static int io_poll_wake(struct wait_queue_entry
*wait
, unsigned mode
, int sync
,
3660 struct io_poll_iocb
*poll
= wait
->private;
3661 struct io_kiocb
*req
= container_of(poll
, struct io_kiocb
, poll
);
3662 struct io_ring_ctx
*ctx
= req
->ctx
;
3663 __poll_t mask
= key_to_poll(key
);
3665 /* for instances that support it check for an event match first: */
3666 if (mask
&& !(mask
& poll
->events
))
3669 list_del_init(&poll
->wait
.entry
);
3672 * Run completion inline if we can. We're using trylock here because
3673 * we are violating the completion_lock -> poll wq lock ordering.
3674 * If we have a link timeout we're going to need the completion_lock
3675 * for finalizing the request, mark us as having grabbed that already.
3678 unsigned long flags
;
3680 if (llist_empty(&ctx
->poll_llist
) &&
3681 spin_trylock_irqsave(&ctx
->completion_lock
, flags
)) {
3684 hash_del(&req
->hash_node
);
3685 io_poll_complete(req
, mask
, 0);
3687 trigger_ev
= io_should_trigger_evfd(ctx
);
3688 if (trigger_ev
&& eventfd_signal_count()) {
3690 req
->work
.func
= io_poll_trigger_evfd
;
3692 req
->flags
|= REQ_F_COMP_LOCKED
;
3696 spin_unlock_irqrestore(&ctx
->completion_lock
, flags
);
3697 __io_cqring_ev_posted(ctx
, trigger_ev
);
3700 req
->llist_node
.next
= NULL
;
3701 /* if the list wasn't empty, we're done */
3702 if (!llist_add(&req
->llist_node
, &ctx
->poll_llist
))
3705 req
->work
.func
= io_poll_flush
;
3709 io_queue_async_work(req
);
3714 struct io_poll_table
{
3715 struct poll_table_struct pt
;
3716 struct io_kiocb
*req
;
3720 static void io_poll_queue_proc(struct file
*file
, struct wait_queue_head
*head
,
3721 struct poll_table_struct
*p
)
3723 struct io_poll_table
*pt
= container_of(p
, struct io_poll_table
, pt
);
3725 if (unlikely(pt
->req
->poll
.head
)) {
3726 pt
->error
= -EINVAL
;
3731 pt
->req
->poll
.head
= head
;
3732 add_wait_queue(head
, &pt
->req
->poll
.wait
);
3735 static void io_poll_req_insert(struct io_kiocb
*req
)
3737 struct io_ring_ctx
*ctx
= req
->ctx
;
3738 struct hlist_head
*list
;
3740 list
= &ctx
->cancel_hash
[hash_long(req
->user_data
, ctx
->cancel_hash_bits
)];
3741 hlist_add_head(&req
->hash_node
, list
);
3744 static int io_poll_add_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
3746 struct io_poll_iocb
*poll
= &req
->poll
;
3749 if (unlikely(req
->ctx
->flags
& IORING_SETUP_IOPOLL
))
3751 if (sqe
->addr
|| sqe
->ioprio
|| sqe
->off
|| sqe
->len
|| sqe
->buf_index
)
3756 events
= READ_ONCE(sqe
->poll_events
);
3757 poll
->events
= demangle_poll(events
) | EPOLLERR
| EPOLLHUP
;
3761 static int io_poll_add(struct io_kiocb
*req
, struct io_kiocb
**nxt
)
3763 struct io_poll_iocb
*poll
= &req
->poll
;
3764 struct io_ring_ctx
*ctx
= req
->ctx
;
3765 struct io_poll_table ipt
;
3766 bool cancel
= false;
3769 INIT_IO_WORK(&req
->work
, io_poll_complete_work
);
3770 INIT_HLIST_NODE(&req
->hash_node
);
3774 poll
->canceled
= false;
3776 ipt
.pt
._qproc
= io_poll_queue_proc
;
3777 ipt
.pt
._key
= poll
->events
;
3779 ipt
.error
= -EINVAL
; /* same as no support for IOCB_CMD_POLL */
3781 /* initialized the list so that we can do list_empty checks */
3782 INIT_LIST_HEAD(&poll
->wait
.entry
);
3783 init_waitqueue_func_entry(&poll
->wait
, io_poll_wake
);
3784 poll
->wait
.private = poll
;
3786 INIT_LIST_HEAD(&req
->list
);
3788 mask
= vfs_poll(poll
->file
, &ipt
.pt
) & poll
->events
;
3790 spin_lock_irq(&ctx
->completion_lock
);
3791 if (likely(poll
->head
)) {
3792 spin_lock(&poll
->head
->lock
);
3793 if (unlikely(list_empty(&poll
->wait
.entry
))) {
3799 if (mask
|| ipt
.error
)
3800 list_del_init(&poll
->wait
.entry
);
3802 WRITE_ONCE(poll
->canceled
, true);
3803 else if (!poll
->done
) /* actually waiting for an event */
3804 io_poll_req_insert(req
);
3805 spin_unlock(&poll
->head
->lock
);
3807 if (mask
) { /* no async, we'd stolen it */
3809 io_poll_complete(req
, mask
, 0);
3811 spin_unlock_irq(&ctx
->completion_lock
);
3814 io_cqring_ev_posted(ctx
);
3815 io_put_req_find_next(req
, nxt
);
3820 static enum hrtimer_restart
io_timeout_fn(struct hrtimer
*timer
)
3822 struct io_timeout_data
*data
= container_of(timer
,
3823 struct io_timeout_data
, timer
);
3824 struct io_kiocb
*req
= data
->req
;
3825 struct io_ring_ctx
*ctx
= req
->ctx
;
3826 unsigned long flags
;
3828 atomic_inc(&ctx
->cq_timeouts
);
3830 spin_lock_irqsave(&ctx
->completion_lock
, flags
);
3832 * We could be racing with timeout deletion. If the list is empty,
3833 * then timeout lookup already found it and will be handling it.
3835 if (!list_empty(&req
->list
)) {
3836 struct io_kiocb
*prev
;
3839 * Adjust the reqs sequence before the current one because it
3840 * will consume a slot in the cq_ring and the cq_tail
3841 * pointer will be increased, otherwise other timeout reqs may
3842 * return in advance without waiting for enough wait_nr.
3845 list_for_each_entry_continue_reverse(prev
, &ctx
->timeout_list
, list
)
3847 list_del_init(&req
->list
);
3850 io_cqring_fill_event(req
, -ETIME
);
3851 io_commit_cqring(ctx
);
3852 spin_unlock_irqrestore(&ctx
->completion_lock
, flags
);
3854 io_cqring_ev_posted(ctx
);
3855 req_set_fail_links(req
);
3857 return HRTIMER_NORESTART
;
3860 static int io_timeout_cancel(struct io_ring_ctx
*ctx
, __u64 user_data
)
3862 struct io_kiocb
*req
;
3865 list_for_each_entry(req
, &ctx
->timeout_list
, list
) {
3866 if (user_data
== req
->user_data
) {
3867 list_del_init(&req
->list
);
3876 ret
= hrtimer_try_to_cancel(&req
->io
->timeout
.timer
);
3880 req_set_fail_links(req
);
3881 io_cqring_fill_event(req
, -ECANCELED
);
3886 static int io_timeout_remove_prep(struct io_kiocb
*req
,
3887 const struct io_uring_sqe
*sqe
)
3889 if (unlikely(req
->ctx
->flags
& IORING_SETUP_IOPOLL
))
3891 if (sqe
->flags
|| sqe
->ioprio
|| sqe
->buf_index
|| sqe
->len
)
3894 req
->timeout
.addr
= READ_ONCE(sqe
->addr
);
3895 req
->timeout
.flags
= READ_ONCE(sqe
->timeout_flags
);
3896 if (req
->timeout
.flags
)
3903 * Remove or update an existing timeout command
3905 static int io_timeout_remove(struct io_kiocb
*req
)
3907 struct io_ring_ctx
*ctx
= req
->ctx
;
3910 spin_lock_irq(&ctx
->completion_lock
);
3911 ret
= io_timeout_cancel(ctx
, req
->timeout
.addr
);
3913 io_cqring_fill_event(req
, ret
);
3914 io_commit_cqring(ctx
);
3915 spin_unlock_irq(&ctx
->completion_lock
);
3916 io_cqring_ev_posted(ctx
);
3918 req_set_fail_links(req
);
3923 static int io_timeout_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
,
3924 bool is_timeout_link
)
3926 struct io_timeout_data
*data
;
3929 if (unlikely(req
->ctx
->flags
& IORING_SETUP_IOPOLL
))
3931 if (sqe
->ioprio
|| sqe
->buf_index
|| sqe
->len
!= 1)
3933 if (sqe
->off
&& is_timeout_link
)
3935 flags
= READ_ONCE(sqe
->timeout_flags
);
3936 if (flags
& ~IORING_TIMEOUT_ABS
)
3939 req
->timeout
.count
= READ_ONCE(sqe
->off
);
3941 if (!req
->io
&& io_alloc_async_ctx(req
))
3944 data
= &req
->io
->timeout
;
3946 req
->flags
|= REQ_F_TIMEOUT
;
3948 if (get_timespec64(&data
->ts
, u64_to_user_ptr(sqe
->addr
)))
3951 if (flags
& IORING_TIMEOUT_ABS
)
3952 data
->mode
= HRTIMER_MODE_ABS
;
3954 data
->mode
= HRTIMER_MODE_REL
;
3956 hrtimer_init(&data
->timer
, CLOCK_MONOTONIC
, data
->mode
);
3960 static int io_timeout(struct io_kiocb
*req
)
3963 struct io_ring_ctx
*ctx
= req
->ctx
;
3964 struct io_timeout_data
*data
;
3965 struct list_head
*entry
;
3968 data
= &req
->io
->timeout
;
3971 * sqe->off holds how many events that need to occur for this
3972 * timeout event to be satisfied. If it isn't set, then this is
3973 * a pure timeout request, sequence isn't used.
3975 count
= req
->timeout
.count
;
3977 req
->flags
|= REQ_F_TIMEOUT_NOSEQ
;
3978 spin_lock_irq(&ctx
->completion_lock
);
3979 entry
= ctx
->timeout_list
.prev
;
3983 req
->sequence
= ctx
->cached_sq_head
+ count
- 1;
3984 data
->seq_offset
= count
;
3987 * Insertion sort, ensuring the first entry in the list is always
3988 * the one we need first.
3990 spin_lock_irq(&ctx
->completion_lock
);
3991 list_for_each_prev(entry
, &ctx
->timeout_list
) {
3992 struct io_kiocb
*nxt
= list_entry(entry
, struct io_kiocb
, list
);
3993 unsigned nxt_sq_head
;
3994 long long tmp
, tmp_nxt
;
3995 u32 nxt_offset
= nxt
->io
->timeout
.seq_offset
;
3997 if (nxt
->flags
& REQ_F_TIMEOUT_NOSEQ
)
4001 * Since cached_sq_head + count - 1 can overflow, use type long
4004 tmp
= (long long)ctx
->cached_sq_head
+ count
- 1;
4005 nxt_sq_head
= nxt
->sequence
- nxt_offset
+ 1;
4006 tmp_nxt
= (long long)nxt_sq_head
+ nxt_offset
- 1;
4009 * cached_sq_head may overflow, and it will never overflow twice
4010 * once there is some timeout req still be valid.
4012 if (ctx
->cached_sq_head
< nxt_sq_head
)
4019 * Sequence of reqs after the insert one and itself should
4020 * be adjusted because each timeout req consumes a slot.
4025 req
->sequence
-= span
;
4027 list_add(&req
->list
, entry
);
4028 data
->timer
.function
= io_timeout_fn
;
4029 hrtimer_start(&data
->timer
, timespec64_to_ktime(data
->ts
), data
->mode
);
4030 spin_unlock_irq(&ctx
->completion_lock
);
4034 static bool io_cancel_cb(struct io_wq_work
*work
, void *data
)
4036 struct io_kiocb
*req
= container_of(work
, struct io_kiocb
, work
);
4038 return req
->user_data
== (unsigned long) data
;
4041 static int io_async_cancel_one(struct io_ring_ctx
*ctx
, void *sqe_addr
)
4043 enum io_wq_cancel cancel_ret
;
4046 cancel_ret
= io_wq_cancel_cb(ctx
->io_wq
, io_cancel_cb
, sqe_addr
);
4047 switch (cancel_ret
) {
4048 case IO_WQ_CANCEL_OK
:
4051 case IO_WQ_CANCEL_RUNNING
:
4054 case IO_WQ_CANCEL_NOTFOUND
:
4062 static void io_async_find_and_cancel(struct io_ring_ctx
*ctx
,
4063 struct io_kiocb
*req
, __u64 sqe_addr
,
4064 struct io_kiocb
**nxt
, int success_ret
)
4066 unsigned long flags
;
4069 ret
= io_async_cancel_one(ctx
, (void *) (unsigned long) sqe_addr
);
4070 if (ret
!= -ENOENT
) {
4071 spin_lock_irqsave(&ctx
->completion_lock
, flags
);
4075 spin_lock_irqsave(&ctx
->completion_lock
, flags
);
4076 ret
= io_timeout_cancel(ctx
, sqe_addr
);
4079 ret
= io_poll_cancel(ctx
, sqe_addr
);
4083 io_cqring_fill_event(req
, ret
);
4084 io_commit_cqring(ctx
);
4085 spin_unlock_irqrestore(&ctx
->completion_lock
, flags
);
4086 io_cqring_ev_posted(ctx
);
4089 req_set_fail_links(req
);
4090 io_put_req_find_next(req
, nxt
);
4093 static int io_async_cancel_prep(struct io_kiocb
*req
,
4094 const struct io_uring_sqe
*sqe
)
4096 if (unlikely(req
->ctx
->flags
& IORING_SETUP_IOPOLL
))
4098 if (sqe
->flags
|| sqe
->ioprio
|| sqe
->off
|| sqe
->len
||
4102 req
->cancel
.addr
= READ_ONCE(sqe
->addr
);
4106 static int io_async_cancel(struct io_kiocb
*req
, struct io_kiocb
**nxt
)
4108 struct io_ring_ctx
*ctx
= req
->ctx
;
4110 io_async_find_and_cancel(ctx
, req
, req
->cancel
.addr
, nxt
, 0);
4114 static int io_files_update_prep(struct io_kiocb
*req
,
4115 const struct io_uring_sqe
*sqe
)
4117 if (sqe
->flags
|| sqe
->ioprio
|| sqe
->rw_flags
)
4120 req
->files_update
.offset
= READ_ONCE(sqe
->off
);
4121 req
->files_update
.nr_args
= READ_ONCE(sqe
->len
);
4122 if (!req
->files_update
.nr_args
)
4124 req
->files_update
.arg
= READ_ONCE(sqe
->addr
);
4128 static int io_files_update(struct io_kiocb
*req
, bool force_nonblock
)
4130 struct io_ring_ctx
*ctx
= req
->ctx
;
4131 struct io_uring_files_update up
;
4137 up
.offset
= req
->files_update
.offset
;
4138 up
.fds
= req
->files_update
.arg
;
4140 mutex_lock(&ctx
->uring_lock
);
4141 ret
= __io_sqe_files_update(ctx
, &up
, req
->files_update
.nr_args
);
4142 mutex_unlock(&ctx
->uring_lock
);
4145 req_set_fail_links(req
);
4146 io_cqring_add_event(req
, ret
);
4151 static int io_req_defer_prep(struct io_kiocb
*req
,
4152 const struct io_uring_sqe
*sqe
)
4159 if (io_op_defs
[req
->opcode
].file_table
) {
4160 ret
= io_grab_files(req
);
4165 io_req_work_grab_env(req
, &io_op_defs
[req
->opcode
]);
4167 switch (req
->opcode
) {
4170 case IORING_OP_READV
:
4171 case IORING_OP_READ_FIXED
:
4172 case IORING_OP_READ
:
4173 ret
= io_read_prep(req
, sqe
, true);
4175 case IORING_OP_WRITEV
:
4176 case IORING_OP_WRITE_FIXED
:
4177 case IORING_OP_WRITE
:
4178 ret
= io_write_prep(req
, sqe
, true);
4180 case IORING_OP_POLL_ADD
:
4181 ret
= io_poll_add_prep(req
, sqe
);
4183 case IORING_OP_POLL_REMOVE
:
4184 ret
= io_poll_remove_prep(req
, sqe
);
4186 case IORING_OP_FSYNC
:
4187 ret
= io_prep_fsync(req
, sqe
);
4189 case IORING_OP_SYNC_FILE_RANGE
:
4190 ret
= io_prep_sfr(req
, sqe
);
4192 case IORING_OP_SENDMSG
:
4193 case IORING_OP_SEND
:
4194 ret
= io_sendmsg_prep(req
, sqe
);
4196 case IORING_OP_RECVMSG
:
4197 case IORING_OP_RECV
:
4198 ret
= io_recvmsg_prep(req
, sqe
);
4200 case IORING_OP_CONNECT
:
4201 ret
= io_connect_prep(req
, sqe
);
4203 case IORING_OP_TIMEOUT
:
4204 ret
= io_timeout_prep(req
, sqe
, false);
4206 case IORING_OP_TIMEOUT_REMOVE
:
4207 ret
= io_timeout_remove_prep(req
, sqe
);
4209 case IORING_OP_ASYNC_CANCEL
:
4210 ret
= io_async_cancel_prep(req
, sqe
);
4212 case IORING_OP_LINK_TIMEOUT
:
4213 ret
= io_timeout_prep(req
, sqe
, true);
4215 case IORING_OP_ACCEPT
:
4216 ret
= io_accept_prep(req
, sqe
);
4218 case IORING_OP_FALLOCATE
:
4219 ret
= io_fallocate_prep(req
, sqe
);
4221 case IORING_OP_OPENAT
:
4222 ret
= io_openat_prep(req
, sqe
);
4224 case IORING_OP_CLOSE
:
4225 ret
= io_close_prep(req
, sqe
);
4227 case IORING_OP_FILES_UPDATE
:
4228 ret
= io_files_update_prep(req
, sqe
);
4230 case IORING_OP_STATX
:
4231 ret
= io_statx_prep(req
, sqe
);
4233 case IORING_OP_FADVISE
:
4234 ret
= io_fadvise_prep(req
, sqe
);
4236 case IORING_OP_MADVISE
:
4237 ret
= io_madvise_prep(req
, sqe
);
4239 case IORING_OP_OPENAT2
:
4240 ret
= io_openat2_prep(req
, sqe
);
4242 case IORING_OP_EPOLL_CTL
:
4243 ret
= io_epoll_ctl_prep(req
, sqe
);
4246 printk_once(KERN_WARNING
"io_uring: unhandled opcode %d\n",
4255 static int io_req_defer(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
4257 struct io_ring_ctx
*ctx
= req
->ctx
;
4260 /* Still need defer if there is pending req in defer list. */
4261 if (!req_need_defer(req
) && list_empty(&ctx
->defer_list
))
4264 if (!req
->io
&& io_alloc_async_ctx(req
))
4267 ret
= io_req_defer_prep(req
, sqe
);
4271 spin_lock_irq(&ctx
->completion_lock
);
4272 if (!req_need_defer(req
) && list_empty(&ctx
->defer_list
)) {
4273 spin_unlock_irq(&ctx
->completion_lock
);
4277 trace_io_uring_defer(ctx
, req
, req
->user_data
);
4278 list_add_tail(&req
->list
, &ctx
->defer_list
);
4279 spin_unlock_irq(&ctx
->completion_lock
);
4280 return -EIOCBQUEUED
;
4283 static void io_cleanup_req(struct io_kiocb
*req
)
4285 struct io_async_ctx
*io
= req
->io
;
4287 switch (req
->opcode
) {
4288 case IORING_OP_READV
:
4289 case IORING_OP_READ_FIXED
:
4290 case IORING_OP_READ
:
4291 case IORING_OP_WRITEV
:
4292 case IORING_OP_WRITE_FIXED
:
4293 case IORING_OP_WRITE
:
4294 if (io
->rw
.iov
!= io
->rw
.fast_iov
)
4297 case IORING_OP_SENDMSG
:
4298 case IORING_OP_RECVMSG
:
4299 if (io
->msg
.iov
!= io
->msg
.fast_iov
)
4302 case IORING_OP_OPENAT
:
4303 case IORING_OP_OPENAT2
:
4304 case IORING_OP_STATX
:
4305 putname(req
->open
.filename
);
4309 req
->flags
&= ~REQ_F_NEED_CLEANUP
;
4312 static int io_issue_sqe(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
,
4313 struct io_kiocb
**nxt
, bool force_nonblock
)
4315 struct io_ring_ctx
*ctx
= req
->ctx
;
4318 switch (req
->opcode
) {
4322 case IORING_OP_READV
:
4323 case IORING_OP_READ_FIXED
:
4324 case IORING_OP_READ
:
4326 ret
= io_read_prep(req
, sqe
, force_nonblock
);
4330 ret
= io_read(req
, nxt
, force_nonblock
);
4332 case IORING_OP_WRITEV
:
4333 case IORING_OP_WRITE_FIXED
:
4334 case IORING_OP_WRITE
:
4336 ret
= io_write_prep(req
, sqe
, force_nonblock
);
4340 ret
= io_write(req
, nxt
, force_nonblock
);
4342 case IORING_OP_FSYNC
:
4344 ret
= io_prep_fsync(req
, sqe
);
4348 ret
= io_fsync(req
, nxt
, force_nonblock
);
4350 case IORING_OP_POLL_ADD
:
4352 ret
= io_poll_add_prep(req
, sqe
);
4356 ret
= io_poll_add(req
, nxt
);
4358 case IORING_OP_POLL_REMOVE
:
4360 ret
= io_poll_remove_prep(req
, sqe
);
4364 ret
= io_poll_remove(req
);
4366 case IORING_OP_SYNC_FILE_RANGE
:
4368 ret
= io_prep_sfr(req
, sqe
);
4372 ret
= io_sync_file_range(req
, nxt
, force_nonblock
);
4374 case IORING_OP_SENDMSG
:
4375 case IORING_OP_SEND
:
4377 ret
= io_sendmsg_prep(req
, sqe
);
4381 if (req
->opcode
== IORING_OP_SENDMSG
)
4382 ret
= io_sendmsg(req
, nxt
, force_nonblock
);
4384 ret
= io_send(req
, nxt
, force_nonblock
);
4386 case IORING_OP_RECVMSG
:
4387 case IORING_OP_RECV
:
4389 ret
= io_recvmsg_prep(req
, sqe
);
4393 if (req
->opcode
== IORING_OP_RECVMSG
)
4394 ret
= io_recvmsg(req
, nxt
, force_nonblock
);
4396 ret
= io_recv(req
, nxt
, force_nonblock
);
4398 case IORING_OP_TIMEOUT
:
4400 ret
= io_timeout_prep(req
, sqe
, false);
4404 ret
= io_timeout(req
);
4406 case IORING_OP_TIMEOUT_REMOVE
:
4408 ret
= io_timeout_remove_prep(req
, sqe
);
4412 ret
= io_timeout_remove(req
);
4414 case IORING_OP_ACCEPT
:
4416 ret
= io_accept_prep(req
, sqe
);
4420 ret
= io_accept(req
, nxt
, force_nonblock
);
4422 case IORING_OP_CONNECT
:
4424 ret
= io_connect_prep(req
, sqe
);
4428 ret
= io_connect(req
, nxt
, force_nonblock
);
4430 case IORING_OP_ASYNC_CANCEL
:
4432 ret
= io_async_cancel_prep(req
, sqe
);
4436 ret
= io_async_cancel(req
, nxt
);
4438 case IORING_OP_FALLOCATE
:
4440 ret
= io_fallocate_prep(req
, sqe
);
4444 ret
= io_fallocate(req
, nxt
, force_nonblock
);
4446 case IORING_OP_OPENAT
:
4448 ret
= io_openat_prep(req
, sqe
);
4452 ret
= io_openat(req
, nxt
, force_nonblock
);
4454 case IORING_OP_CLOSE
:
4456 ret
= io_close_prep(req
, sqe
);
4460 ret
= io_close(req
, nxt
, force_nonblock
);
4462 case IORING_OP_FILES_UPDATE
:
4464 ret
= io_files_update_prep(req
, sqe
);
4468 ret
= io_files_update(req
, force_nonblock
);
4470 case IORING_OP_STATX
:
4472 ret
= io_statx_prep(req
, sqe
);
4476 ret
= io_statx(req
, nxt
, force_nonblock
);
4478 case IORING_OP_FADVISE
:
4480 ret
= io_fadvise_prep(req
, sqe
);
4484 ret
= io_fadvise(req
, nxt
, force_nonblock
);
4486 case IORING_OP_MADVISE
:
4488 ret
= io_madvise_prep(req
, sqe
);
4492 ret
= io_madvise(req
, nxt
, force_nonblock
);
4494 case IORING_OP_OPENAT2
:
4496 ret
= io_openat2_prep(req
, sqe
);
4500 ret
= io_openat2(req
, nxt
, force_nonblock
);
4502 case IORING_OP_EPOLL_CTL
:
4504 ret
= io_epoll_ctl_prep(req
, sqe
);
4508 ret
= io_epoll_ctl(req
, nxt
, force_nonblock
);
4518 if (ctx
->flags
& IORING_SETUP_IOPOLL
) {
4519 const bool in_async
= io_wq_current_is_worker();
4521 if (req
->result
== -EAGAIN
)
4524 /* workqueue context doesn't hold uring_lock, grab it now */
4526 mutex_lock(&ctx
->uring_lock
);
4528 io_iopoll_req_issued(req
);
4531 mutex_unlock(&ctx
->uring_lock
);
4537 static void io_wq_submit_work(struct io_wq_work
**workptr
)
4539 struct io_wq_work
*work
= *workptr
;
4540 struct io_kiocb
*req
= container_of(work
, struct io_kiocb
, work
);
4541 struct io_kiocb
*nxt
= NULL
;
4544 /* if NO_CANCEL is set, we must still run the work */
4545 if ((work
->flags
& (IO_WQ_WORK_CANCEL
|IO_WQ_WORK_NO_CANCEL
)) ==
4546 IO_WQ_WORK_CANCEL
) {
4551 req
->in_async
= true;
4553 ret
= io_issue_sqe(req
, NULL
, &nxt
, false);
4555 * We can get EAGAIN for polled IO even though we're
4556 * forcing a sync submission from here, since we can't
4557 * wait for request slots on the block side.
4565 /* drop submission reference */
4569 req_set_fail_links(req
);
4570 io_cqring_add_event(req
, ret
);
4574 /* if a dependent link is ready, pass it back */
4576 io_wq_assign_next(workptr
, nxt
);
4579 static int io_req_needs_file(struct io_kiocb
*req
, int fd
)
4581 if (!io_op_defs
[req
->opcode
].needs_file
)
4583 if ((fd
== -1 || fd
== AT_FDCWD
) && io_op_defs
[req
->opcode
].fd_non_neg
)
4588 static inline struct file
*io_file_from_index(struct io_ring_ctx
*ctx
,
4591 struct fixed_file_table
*table
;
4593 table
= &ctx
->file_data
->table
[index
>> IORING_FILE_TABLE_SHIFT
];
4594 return table
->files
[index
& IORING_FILE_TABLE_MASK
];;
4597 static int io_req_set_file(struct io_submit_state
*state
, struct io_kiocb
*req
,
4598 const struct io_uring_sqe
*sqe
)
4600 struct io_ring_ctx
*ctx
= req
->ctx
;
4604 flags
= READ_ONCE(sqe
->flags
);
4605 fd
= READ_ONCE(sqe
->fd
);
4607 if (!io_req_needs_file(req
, fd
))
4610 if (flags
& IOSQE_FIXED_FILE
) {
4611 if (unlikely(!ctx
->file_data
||
4612 (unsigned) fd
>= ctx
->nr_user_files
))
4614 fd
= array_index_nospec(fd
, ctx
->nr_user_files
);
4615 req
->file
= io_file_from_index(ctx
, fd
);
4618 req
->flags
|= REQ_F_FIXED_FILE
;
4619 percpu_ref_get(&ctx
->file_data
->refs
);
4621 if (req
->needs_fixed_file
)
4623 trace_io_uring_file_get(ctx
, fd
);
4624 req
->file
= io_file_get(state
, fd
);
4625 if (unlikely(!req
->file
))
4632 static int io_grab_files(struct io_kiocb
*req
)
4635 struct io_ring_ctx
*ctx
= req
->ctx
;
4637 if (req
->work
.files
|| (req
->flags
& REQ_F_NO_FILE_TABLE
))
4639 if (!ctx
->ring_file
)
4643 spin_lock_irq(&ctx
->inflight_lock
);
4645 * We use the f_ops->flush() handler to ensure that we can flush
4646 * out work accessing these files if the fd is closed. Check if
4647 * the fd has changed since we started down this path, and disallow
4648 * this operation if it has.
4650 if (fcheck(ctx
->ring_fd
) == ctx
->ring_file
) {
4651 list_add(&req
->inflight_entry
, &ctx
->inflight_list
);
4652 req
->flags
|= REQ_F_INFLIGHT
;
4653 req
->work
.files
= current
->files
;
4656 spin_unlock_irq(&ctx
->inflight_lock
);
4662 static enum hrtimer_restart
io_link_timeout_fn(struct hrtimer
*timer
)
4664 struct io_timeout_data
*data
= container_of(timer
,
4665 struct io_timeout_data
, timer
);
4666 struct io_kiocb
*req
= data
->req
;
4667 struct io_ring_ctx
*ctx
= req
->ctx
;
4668 struct io_kiocb
*prev
= NULL
;
4669 unsigned long flags
;
4671 spin_lock_irqsave(&ctx
->completion_lock
, flags
);
4674 * We don't expect the list to be empty, that will only happen if we
4675 * race with the completion of the linked work.
4677 if (!list_empty(&req
->link_list
)) {
4678 prev
= list_entry(req
->link_list
.prev
, struct io_kiocb
,
4680 if (refcount_inc_not_zero(&prev
->refs
)) {
4681 list_del_init(&req
->link_list
);
4682 prev
->flags
&= ~REQ_F_LINK_TIMEOUT
;
4687 spin_unlock_irqrestore(&ctx
->completion_lock
, flags
);
4690 req_set_fail_links(prev
);
4691 io_async_find_and_cancel(ctx
, req
, prev
->user_data
, NULL
,
4695 io_cqring_add_event(req
, -ETIME
);
4698 return HRTIMER_NORESTART
;
4701 static void io_queue_linked_timeout(struct io_kiocb
*req
)
4703 struct io_ring_ctx
*ctx
= req
->ctx
;
4706 * If the list is now empty, then our linked request finished before
4707 * we got a chance to setup the timer
4709 spin_lock_irq(&ctx
->completion_lock
);
4710 if (!list_empty(&req
->link_list
)) {
4711 struct io_timeout_data
*data
= &req
->io
->timeout
;
4713 data
->timer
.function
= io_link_timeout_fn
;
4714 hrtimer_start(&data
->timer
, timespec64_to_ktime(data
->ts
),
4717 spin_unlock_irq(&ctx
->completion_lock
);
4719 /* drop submission reference */
4723 static struct io_kiocb
*io_prep_linked_timeout(struct io_kiocb
*req
)
4725 struct io_kiocb
*nxt
;
4727 if (!(req
->flags
& REQ_F_LINK
))
4730 nxt
= list_first_entry_or_null(&req
->link_list
, struct io_kiocb
,
4732 if (!nxt
|| nxt
->opcode
!= IORING_OP_LINK_TIMEOUT
)
4735 req
->flags
|= REQ_F_LINK_TIMEOUT
;
4739 static void __io_queue_sqe(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
4741 struct io_kiocb
*linked_timeout
;
4742 struct io_kiocb
*nxt
= NULL
;
4743 const struct cred
*old_creds
= NULL
;
4747 linked_timeout
= io_prep_linked_timeout(req
);
4749 if (req
->work
.creds
&& req
->work
.creds
!= current_cred()) {
4751 revert_creds(old_creds
);
4752 if (old_creds
== req
->work
.creds
)
4753 old_creds
= NULL
; /* restored original creds */
4755 old_creds
= override_creds(req
->work
.creds
);
4758 ret
= io_issue_sqe(req
, sqe
, &nxt
, true);
4761 * We async punt it if the file wasn't marked NOWAIT, or if the file
4762 * doesn't support non-blocking read/write attempts
4764 if (ret
== -EAGAIN
&& (!(req
->flags
& REQ_F_NOWAIT
) ||
4765 (req
->flags
& REQ_F_MUST_PUNT
))) {
4767 if (io_op_defs
[req
->opcode
].file_table
) {
4768 ret
= io_grab_files(req
);
4774 * Queued up for async execution, worker will release
4775 * submit reference when the iocb is actually submitted.
4777 io_queue_async_work(req
);
4782 /* drop submission reference */
4783 io_put_req_find_next(req
, &nxt
);
4785 if (linked_timeout
) {
4787 io_queue_linked_timeout(linked_timeout
);
4789 io_put_req(linked_timeout
);
4792 /* and drop final reference, if we failed */
4794 io_cqring_add_event(req
, ret
);
4795 req_set_fail_links(req
);
4803 if (req
->flags
& REQ_F_FORCE_ASYNC
)
4808 revert_creds(old_creds
);
4811 static void io_queue_sqe(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
4815 ret
= io_req_defer(req
, sqe
);
4817 if (ret
!= -EIOCBQUEUED
) {
4819 io_cqring_add_event(req
, ret
);
4820 req_set_fail_links(req
);
4821 io_double_put_req(req
);
4823 } else if (req
->flags
& REQ_F_FORCE_ASYNC
) {
4824 ret
= io_req_defer_prep(req
, sqe
);
4825 if (unlikely(ret
< 0))
4828 * Never try inline submit of IOSQE_ASYNC is set, go straight
4829 * to async execution.
4831 req
->work
.flags
|= IO_WQ_WORK_CONCURRENT
;
4832 io_queue_async_work(req
);
4834 __io_queue_sqe(req
, sqe
);
4838 static inline void io_queue_link_head(struct io_kiocb
*req
)
4840 if (unlikely(req
->flags
& REQ_F_FAIL_LINK
)) {
4841 io_cqring_add_event(req
, -ECANCELED
);
4842 io_double_put_req(req
);
4844 io_queue_sqe(req
, NULL
);
4847 #define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \
4848 IOSQE_IO_HARDLINK | IOSQE_ASYNC)
4850 static bool io_submit_sqe(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
,
4851 struct io_submit_state
*state
, struct io_kiocb
**link
)
4853 struct io_ring_ctx
*ctx
= req
->ctx
;
4854 unsigned int sqe_flags
;
4857 sqe_flags
= READ_ONCE(sqe
->flags
);
4859 /* enforce forwards compatibility on users */
4860 if (unlikely(sqe_flags
& ~SQE_VALID_FLAGS
)) {
4865 id
= READ_ONCE(sqe
->personality
);
4867 req
->work
.creds
= idr_find(&ctx
->personality_idr
, id
);
4868 if (unlikely(!req
->work
.creds
)) {
4872 get_cred(req
->work
.creds
);
4875 /* same numerical values with corresponding REQ_F_*, safe to copy */
4876 req
->flags
|= sqe_flags
& (IOSQE_IO_DRAIN
|IOSQE_IO_HARDLINK
|
4879 ret
= io_req_set_file(state
, req
, sqe
);
4880 if (unlikely(ret
)) {
4882 io_cqring_add_event(req
, ret
);
4883 io_double_put_req(req
);
4888 * If we already have a head request, queue this one for async
4889 * submittal once the head completes. If we don't have a head but
4890 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
4891 * submitted sync once the chain is complete. If none of those
4892 * conditions are true (normal request), then just queue it.
4895 struct io_kiocb
*head
= *link
;
4898 * Taking sequential execution of a link, draining both sides
4899 * of the link also fullfils IOSQE_IO_DRAIN semantics for all
4900 * requests in the link. So, it drains the head and the
4901 * next after the link request. The last one is done via
4902 * drain_next flag to persist the effect across calls.
4904 if (sqe_flags
& IOSQE_IO_DRAIN
) {
4905 head
->flags
|= REQ_F_IO_DRAIN
;
4906 ctx
->drain_next
= 1;
4908 if (io_alloc_async_ctx(req
)) {
4913 ret
= io_req_defer_prep(req
, sqe
);
4915 /* fail even hard links since we don't submit */
4916 head
->flags
|= REQ_F_FAIL_LINK
;
4919 trace_io_uring_link(ctx
, req
, head
);
4920 list_add_tail(&req
->link_list
, &head
->link_list
);
4922 /* last request of a link, enqueue the link */
4923 if (!(sqe_flags
& (IOSQE_IO_LINK
|IOSQE_IO_HARDLINK
))) {
4924 io_queue_link_head(head
);
4928 if (unlikely(ctx
->drain_next
)) {
4929 req
->flags
|= REQ_F_IO_DRAIN
;
4930 req
->ctx
->drain_next
= 0;
4932 if (sqe_flags
& (IOSQE_IO_LINK
|IOSQE_IO_HARDLINK
)) {
4933 req
->flags
|= REQ_F_LINK
;
4934 INIT_LIST_HEAD(&req
->link_list
);
4936 if (io_alloc_async_ctx(req
)) {
4940 ret
= io_req_defer_prep(req
, sqe
);
4942 req
->flags
|= REQ_F_FAIL_LINK
;
4945 io_queue_sqe(req
, sqe
);
4953 * Batched submission is done, ensure local IO is flushed out.
4955 static void io_submit_state_end(struct io_submit_state
*state
)
4957 blk_finish_plug(&state
->plug
);
4959 if (state
->free_reqs
)
4960 kmem_cache_free_bulk(req_cachep
, state
->free_reqs
, state
->reqs
);
4964 * Start submission side cache.
4966 static void io_submit_state_start(struct io_submit_state
*state
,
4967 unsigned int max_ios
)
4969 blk_start_plug(&state
->plug
);
4970 state
->free_reqs
= 0;
4972 state
->ios_left
= max_ios
;
4975 static void io_commit_sqring(struct io_ring_ctx
*ctx
)
4977 struct io_rings
*rings
= ctx
->rings
;
4980 * Ensure any loads from the SQEs are done at this point,
4981 * since once we write the new head, the application could
4982 * write new data to them.
4984 smp_store_release(&rings
->sq
.head
, ctx
->cached_sq_head
);
4988 * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
4989 * that is mapped by userspace. This means that care needs to be taken to
4990 * ensure that reads are stable, as we cannot rely on userspace always
4991 * being a good citizen. If members of the sqe are validated and then later
4992 * used, it's important that those reads are done through READ_ONCE() to
4993 * prevent a re-load down the line.
4995 static bool io_get_sqring(struct io_ring_ctx
*ctx
, struct io_kiocb
*req
,
4996 const struct io_uring_sqe
**sqe_ptr
)
4998 u32
*sq_array
= ctx
->sq_array
;
5002 * The cached sq head (or cq tail) serves two purposes:
5004 * 1) allows us to batch the cost of updating the user visible
5006 * 2) allows the kernel side to track the head on its own, even
5007 * though the application is the one updating it.
5009 head
= READ_ONCE(sq_array
[ctx
->cached_sq_head
& ctx
->sq_mask
]);
5010 if (likely(head
< ctx
->sq_entries
)) {
5012 * All io need record the previous position, if LINK vs DARIN,
5013 * it can be used to mark the position of the first IO in the
5016 req
->sequence
= ctx
->cached_sq_head
;
5017 *sqe_ptr
= &ctx
->sq_sqes
[head
];
5018 req
->opcode
= READ_ONCE((*sqe_ptr
)->opcode
);
5019 req
->user_data
= READ_ONCE((*sqe_ptr
)->user_data
);
5020 ctx
->cached_sq_head
++;
5024 /* drop invalid entries */
5025 ctx
->cached_sq_head
++;
5026 ctx
->cached_sq_dropped
++;
5027 WRITE_ONCE(ctx
->rings
->sq_dropped
, ctx
->cached_sq_dropped
);
5031 static int io_submit_sqes(struct io_ring_ctx
*ctx
, unsigned int nr
,
5032 struct file
*ring_file
, int ring_fd
,
5033 struct mm_struct
**mm
, bool async
)
5035 struct io_submit_state state
, *statep
= NULL
;
5036 struct io_kiocb
*link
= NULL
;
5037 int i
, submitted
= 0;
5038 bool mm_fault
= false;
5040 /* if we have a backlog and couldn't flush it all, return BUSY */
5041 if (test_bit(0, &ctx
->sq_check_overflow
)) {
5042 if (!list_empty(&ctx
->cq_overflow_list
) &&
5043 !io_cqring_overflow_flush(ctx
, false))
5047 /* make sure SQ entry isn't read before tail */
5048 nr
= min3(nr
, ctx
->sq_entries
, io_sqring_entries(ctx
));
5050 if (!percpu_ref_tryget_many(&ctx
->refs
, nr
))
5053 if (nr
> IO_PLUG_THRESHOLD
) {
5054 io_submit_state_start(&state
, nr
);
5058 ctx
->ring_fd
= ring_fd
;
5059 ctx
->ring_file
= ring_file
;
5061 for (i
= 0; i
< nr
; i
++) {
5062 const struct io_uring_sqe
*sqe
;
5063 struct io_kiocb
*req
;
5066 req
= io_get_req(ctx
, statep
);
5067 if (unlikely(!req
)) {
5069 submitted
= -EAGAIN
;
5072 if (!io_get_sqring(ctx
, req
, &sqe
)) {
5073 __io_req_do_free(req
);
5077 /* will complete beyond this point, count as submitted */
5080 if (unlikely(req
->opcode
>= IORING_OP_LAST
)) {
5083 io_cqring_add_event(req
, err
);
5084 io_double_put_req(req
);
5088 if (io_op_defs
[req
->opcode
].needs_mm
&& !*mm
) {
5089 mm_fault
= mm_fault
|| !mmget_not_zero(ctx
->sqo_mm
);
5090 if (unlikely(mm_fault
)) {
5094 use_mm(ctx
->sqo_mm
);
5098 req
->in_async
= async
;
5099 req
->needs_fixed_file
= async
;
5100 trace_io_uring_submit_sqe(ctx
, req
->opcode
, req
->user_data
,
5102 if (!io_submit_sqe(req
, sqe
, statep
, &link
))
5106 if (unlikely(submitted
!= nr
)) {
5107 int ref_used
= (submitted
== -EAGAIN
) ? 0 : submitted
;
5109 percpu_ref_put_many(&ctx
->refs
, nr
- ref_used
);
5112 io_queue_link_head(link
);
5114 io_submit_state_end(&state
);
5116 /* Commit SQ ring head once we've consumed and submitted all SQEs */
5117 io_commit_sqring(ctx
);
5122 static int io_sq_thread(void *data
)
5124 struct io_ring_ctx
*ctx
= data
;
5125 struct mm_struct
*cur_mm
= NULL
;
5126 const struct cred
*old_cred
;
5127 mm_segment_t old_fs
;
5129 unsigned long timeout
;
5132 complete(&ctx
->completions
[1]);
5136 old_cred
= override_creds(ctx
->creds
);
5138 timeout
= jiffies
+ ctx
->sq_thread_idle
;
5139 while (!kthread_should_park()) {
5140 unsigned int to_submit
;
5142 if (!list_empty(&ctx
->poll_list
)) {
5143 unsigned nr_events
= 0;
5145 mutex_lock(&ctx
->uring_lock
);
5146 if (!list_empty(&ctx
->poll_list
))
5147 io_iopoll_getevents(ctx
, &nr_events
, 0);
5149 timeout
= jiffies
+ ctx
->sq_thread_idle
;
5150 mutex_unlock(&ctx
->uring_lock
);
5153 to_submit
= io_sqring_entries(ctx
);
5156 * If submit got -EBUSY, flag us as needing the application
5157 * to enter the kernel to reap and flush events.
5159 if (!to_submit
|| ret
== -EBUSY
) {
5161 * Drop cur_mm before scheduling, we can't hold it for
5162 * long periods (or over schedule()). Do this before
5163 * adding ourselves to the waitqueue, as the unuse/drop
5173 * We're polling. If we're within the defined idle
5174 * period, then let us spin without work before going
5175 * to sleep. The exception is if we got EBUSY doing
5176 * more IO, we should wait for the application to
5177 * reap events and wake us up.
5179 if (!list_empty(&ctx
->poll_list
) ||
5180 (!time_after(jiffies
, timeout
) && ret
!= -EBUSY
&&
5181 !percpu_ref_is_dying(&ctx
->refs
))) {
5186 prepare_to_wait(&ctx
->sqo_wait
, &wait
,
5187 TASK_INTERRUPTIBLE
);
5190 * While doing polled IO, before going to sleep, we need
5191 * to check if there are new reqs added to poll_list, it
5192 * is because reqs may have been punted to io worker and
5193 * will be added to poll_list later, hence check the
5196 if ((ctx
->flags
& IORING_SETUP_IOPOLL
) &&
5197 !list_empty_careful(&ctx
->poll_list
)) {
5198 finish_wait(&ctx
->sqo_wait
, &wait
);
5202 /* Tell userspace we may need a wakeup call */
5203 ctx
->rings
->sq_flags
|= IORING_SQ_NEED_WAKEUP
;
5204 /* make sure to read SQ tail after writing flags */
5207 to_submit
= io_sqring_entries(ctx
);
5208 if (!to_submit
|| ret
== -EBUSY
) {
5209 if (kthread_should_park()) {
5210 finish_wait(&ctx
->sqo_wait
, &wait
);
5213 if (signal_pending(current
))
5214 flush_signals(current
);
5216 finish_wait(&ctx
->sqo_wait
, &wait
);
5218 ctx
->rings
->sq_flags
&= ~IORING_SQ_NEED_WAKEUP
;
5221 finish_wait(&ctx
->sqo_wait
, &wait
);
5223 ctx
->rings
->sq_flags
&= ~IORING_SQ_NEED_WAKEUP
;
5226 mutex_lock(&ctx
->uring_lock
);
5227 ret
= io_submit_sqes(ctx
, to_submit
, NULL
, -1, &cur_mm
, true);
5228 mutex_unlock(&ctx
->uring_lock
);
5229 timeout
= jiffies
+ ctx
->sq_thread_idle
;
5237 revert_creds(old_cred
);
5244 struct io_wait_queue
{
5245 struct wait_queue_entry wq
;
5246 struct io_ring_ctx
*ctx
;
5248 unsigned nr_timeouts
;
5251 static inline bool io_should_wake(struct io_wait_queue
*iowq
, bool noflush
)
5253 struct io_ring_ctx
*ctx
= iowq
->ctx
;
5256 * Wake up if we have enough events, or if a timeout occurred since we
5257 * started waiting. For timeouts, we always want to return to userspace,
5258 * regardless of event count.
5260 return io_cqring_events(ctx
, noflush
) >= iowq
->to_wait
||
5261 atomic_read(&ctx
->cq_timeouts
) != iowq
->nr_timeouts
;
5264 static int io_wake_function(struct wait_queue_entry
*curr
, unsigned int mode
,
5265 int wake_flags
, void *key
)
5267 struct io_wait_queue
*iowq
= container_of(curr
, struct io_wait_queue
,
5270 /* use noflush == true, as we can't safely rely on locking context */
5271 if (!io_should_wake(iowq
, true))
5274 return autoremove_wake_function(curr
, mode
, wake_flags
, key
);
5278 * Wait until events become available, if we don't already have some. The
5279 * application must reap them itself, as they reside on the shared cq ring.
5281 static int io_cqring_wait(struct io_ring_ctx
*ctx
, int min_events
,
5282 const sigset_t __user
*sig
, size_t sigsz
)
5284 struct io_wait_queue iowq
= {
5287 .func
= io_wake_function
,
5288 .entry
= LIST_HEAD_INIT(iowq
.wq
.entry
),
5291 .to_wait
= min_events
,
5293 struct io_rings
*rings
= ctx
->rings
;
5296 if (io_cqring_events(ctx
, false) >= min_events
)
5300 #ifdef CONFIG_COMPAT
5301 if (in_compat_syscall())
5302 ret
= set_compat_user_sigmask((const compat_sigset_t __user
*)sig
,
5306 ret
= set_user_sigmask(sig
, sigsz
);
5312 iowq
.nr_timeouts
= atomic_read(&ctx
->cq_timeouts
);
5313 trace_io_uring_cqring_wait(ctx
, min_events
);
5315 prepare_to_wait_exclusive(&ctx
->wait
, &iowq
.wq
,
5316 TASK_INTERRUPTIBLE
);
5317 if (io_should_wake(&iowq
, false))
5320 if (signal_pending(current
)) {
5325 finish_wait(&ctx
->wait
, &iowq
.wq
);
5327 restore_saved_sigmask_unless(ret
== -EINTR
);
5329 return READ_ONCE(rings
->cq
.head
) == READ_ONCE(rings
->cq
.tail
) ? ret
: 0;
5332 static void __io_sqe_files_unregister(struct io_ring_ctx
*ctx
)
5334 #if defined(CONFIG_UNIX)
5335 if (ctx
->ring_sock
) {
5336 struct sock
*sock
= ctx
->ring_sock
->sk
;
5337 struct sk_buff
*skb
;
5339 while ((skb
= skb_dequeue(&sock
->sk_receive_queue
)) != NULL
)
5345 for (i
= 0; i
< ctx
->nr_user_files
; i
++) {
5348 file
= io_file_from_index(ctx
, i
);
5355 static void io_file_ref_kill(struct percpu_ref
*ref
)
5357 struct fixed_file_data
*data
;
5359 data
= container_of(ref
, struct fixed_file_data
, refs
);
5360 complete(&data
->done
);
5363 static void io_file_ref_exit_and_free(struct work_struct
*work
)
5365 struct fixed_file_data
*data
;
5367 data
= container_of(work
, struct fixed_file_data
, ref_work
);
5370 * Ensure any percpu-ref atomic switch callback has run, it could have
5371 * been in progress when the files were being unregistered. Once
5372 * that's done, we can safely exit and free the ref and containing
5376 percpu_ref_exit(&data
->refs
);
5380 static int io_sqe_files_unregister(struct io_ring_ctx
*ctx
)
5382 struct fixed_file_data
*data
= ctx
->file_data
;
5383 unsigned nr_tables
, i
;
5388 percpu_ref_kill_and_confirm(&data
->refs
, io_file_ref_kill
);
5389 flush_work(&data
->ref_work
);
5390 wait_for_completion(&data
->done
);
5391 io_ring_file_ref_flush(data
);
5393 __io_sqe_files_unregister(ctx
);
5394 nr_tables
= DIV_ROUND_UP(ctx
->nr_user_files
, IORING_MAX_FILES_TABLE
);
5395 for (i
= 0; i
< nr_tables
; i
++)
5396 kfree(data
->table
[i
].files
);
5398 INIT_WORK(&data
->ref_work
, io_file_ref_exit_and_free
);
5399 queue_work(system_wq
, &data
->ref_work
);
5400 ctx
->file_data
= NULL
;
5401 ctx
->nr_user_files
= 0;
5405 static void io_sq_thread_stop(struct io_ring_ctx
*ctx
)
5407 if (ctx
->sqo_thread
) {
5408 wait_for_completion(&ctx
->completions
[1]);
5410 * The park is a bit of a work-around, without it we get
5411 * warning spews on shutdown with SQPOLL set and affinity
5412 * set to a single CPU.
5414 kthread_park(ctx
->sqo_thread
);
5415 kthread_stop(ctx
->sqo_thread
);
5416 ctx
->sqo_thread
= NULL
;
5420 static void io_finish_async(struct io_ring_ctx
*ctx
)
5422 io_sq_thread_stop(ctx
);
5425 io_wq_destroy(ctx
->io_wq
);
5430 #if defined(CONFIG_UNIX)
5432 * Ensure the UNIX gc is aware of our file set, so we are certain that
5433 * the io_uring can be safely unregistered on process exit, even if we have
5434 * loops in the file referencing.
5436 static int __io_sqe_files_scm(struct io_ring_ctx
*ctx
, int nr
, int offset
)
5438 struct sock
*sk
= ctx
->ring_sock
->sk
;
5439 struct scm_fp_list
*fpl
;
5440 struct sk_buff
*skb
;
5443 fpl
= kzalloc(sizeof(*fpl
), GFP_KERNEL
);
5447 skb
= alloc_skb(0, GFP_KERNEL
);
5456 fpl
->user
= get_uid(ctx
->user
);
5457 for (i
= 0; i
< nr
; i
++) {
5458 struct file
*file
= io_file_from_index(ctx
, i
+ offset
);
5462 fpl
->fp
[nr_files
] = get_file(file
);
5463 unix_inflight(fpl
->user
, fpl
->fp
[nr_files
]);
5468 fpl
->max
= SCM_MAX_FD
;
5469 fpl
->count
= nr_files
;
5470 UNIXCB(skb
).fp
= fpl
;
5471 skb
->destructor
= unix_destruct_scm
;
5472 refcount_add(skb
->truesize
, &sk
->sk_wmem_alloc
);
5473 skb_queue_head(&sk
->sk_receive_queue
, skb
);
5475 for (i
= 0; i
< nr_files
; i
++)
5486 * If UNIX sockets are enabled, fd passing can cause a reference cycle which
5487 * causes regular reference counting to break down. We rely on the UNIX
5488 * garbage collection to take care of this problem for us.
5490 static int io_sqe_files_scm(struct io_ring_ctx
*ctx
)
5492 unsigned left
, total
;
5496 left
= ctx
->nr_user_files
;
5498 unsigned this_files
= min_t(unsigned, left
, SCM_MAX_FD
);
5500 ret
= __io_sqe_files_scm(ctx
, this_files
, total
);
5504 total
+= this_files
;
5510 while (total
< ctx
->nr_user_files
) {
5511 struct file
*file
= io_file_from_index(ctx
, total
);
5521 static int io_sqe_files_scm(struct io_ring_ctx
*ctx
)
5527 static int io_sqe_alloc_file_tables(struct io_ring_ctx
*ctx
, unsigned nr_tables
,
5532 for (i
= 0; i
< nr_tables
; i
++) {
5533 struct fixed_file_table
*table
= &ctx
->file_data
->table
[i
];
5534 unsigned this_files
;
5536 this_files
= min(nr_files
, IORING_MAX_FILES_TABLE
);
5537 table
->files
= kcalloc(this_files
, sizeof(struct file
*),
5541 nr_files
-= this_files
;
5547 for (i
= 0; i
< nr_tables
; i
++) {
5548 struct fixed_file_table
*table
= &ctx
->file_data
->table
[i
];
5549 kfree(table
->files
);
5554 static void io_ring_file_put(struct io_ring_ctx
*ctx
, struct file
*file
)
5556 #if defined(CONFIG_UNIX)
5557 struct sock
*sock
= ctx
->ring_sock
->sk
;
5558 struct sk_buff_head list
, *head
= &sock
->sk_receive_queue
;
5559 struct sk_buff
*skb
;
5562 __skb_queue_head_init(&list
);
5565 * Find the skb that holds this file in its SCM_RIGHTS. When found,
5566 * remove this entry and rearrange the file array.
5568 skb
= skb_dequeue(head
);
5570 struct scm_fp_list
*fp
;
5572 fp
= UNIXCB(skb
).fp
;
5573 for (i
= 0; i
< fp
->count
; i
++) {
5576 if (fp
->fp
[i
] != file
)
5579 unix_notinflight(fp
->user
, fp
->fp
[i
]);
5580 left
= fp
->count
- 1 - i
;
5582 memmove(&fp
->fp
[i
], &fp
->fp
[i
+ 1],
5583 left
* sizeof(struct file
*));
5590 __skb_queue_tail(&list
, skb
);
5600 __skb_queue_tail(&list
, skb
);
5602 skb
= skb_dequeue(head
);
5605 if (skb_peek(&list
)) {
5606 spin_lock_irq(&head
->lock
);
5607 while ((skb
= __skb_dequeue(&list
)) != NULL
)
5608 __skb_queue_tail(head
, skb
);
5609 spin_unlock_irq(&head
->lock
);
5616 struct io_file_put
{
5617 struct llist_node llist
;
5622 static void io_ring_file_ref_flush(struct fixed_file_data
*data
)
5624 struct io_file_put
*pfile
, *tmp
;
5625 struct llist_node
*node
;
5627 while ((node
= llist_del_all(&data
->put_llist
)) != NULL
) {
5628 llist_for_each_entry_safe(pfile
, tmp
, node
, llist
) {
5629 io_ring_file_put(data
->ctx
, pfile
->file
);
5630 if (pfile
->free_pfile
)
5636 static void io_ring_file_ref_switch(struct work_struct
*work
)
5638 struct fixed_file_data
*data
;
5640 data
= container_of(work
, struct fixed_file_data
, ref_work
);
5641 io_ring_file_ref_flush(data
);
5642 percpu_ref_switch_to_percpu(&data
->refs
);
5645 static void io_file_data_ref_zero(struct percpu_ref
*ref
)
5647 struct fixed_file_data
*data
;
5649 data
= container_of(ref
, struct fixed_file_data
, refs
);
5652 * We can't safely switch from inside this context, punt to wq. If
5653 * the table ref is going away, the table is being unregistered.
5654 * Don't queue up the async work for that case, the caller will
5657 if (!percpu_ref_is_dying(&data
->refs
))
5658 queue_work(system_wq
, &data
->ref_work
);
5661 static int io_sqe_files_register(struct io_ring_ctx
*ctx
, void __user
*arg
,
5664 __s32 __user
*fds
= (__s32 __user
*) arg
;
5674 if (nr_args
> IORING_MAX_FIXED_FILES
)
5677 ctx
->file_data
= kzalloc(sizeof(*ctx
->file_data
), GFP_KERNEL
);
5678 if (!ctx
->file_data
)
5680 ctx
->file_data
->ctx
= ctx
;
5681 init_completion(&ctx
->file_data
->done
);
5683 nr_tables
= DIV_ROUND_UP(nr_args
, IORING_MAX_FILES_TABLE
);
5684 ctx
->file_data
->table
= kcalloc(nr_tables
,
5685 sizeof(struct fixed_file_table
),
5687 if (!ctx
->file_data
->table
) {
5688 kfree(ctx
->file_data
);
5689 ctx
->file_data
= NULL
;
5693 if (percpu_ref_init(&ctx
->file_data
->refs
, io_file_data_ref_zero
,
5694 PERCPU_REF_ALLOW_REINIT
, GFP_KERNEL
)) {
5695 kfree(ctx
->file_data
->table
);
5696 kfree(ctx
->file_data
);
5697 ctx
->file_data
= NULL
;
5700 ctx
->file_data
->put_llist
.first
= NULL
;
5701 INIT_WORK(&ctx
->file_data
->ref_work
, io_ring_file_ref_switch
);
5703 if (io_sqe_alloc_file_tables(ctx
, nr_tables
, nr_args
)) {
5704 percpu_ref_exit(&ctx
->file_data
->refs
);
5705 kfree(ctx
->file_data
->table
);
5706 kfree(ctx
->file_data
);
5707 ctx
->file_data
= NULL
;
5711 for (i
= 0; i
< nr_args
; i
++, ctx
->nr_user_files
++) {
5712 struct fixed_file_table
*table
;
5716 if (copy_from_user(&fd
, &fds
[i
], sizeof(fd
)))
5718 /* allow sparse sets */
5724 table
= &ctx
->file_data
->table
[i
>> IORING_FILE_TABLE_SHIFT
];
5725 index
= i
& IORING_FILE_TABLE_MASK
;
5733 * Don't allow io_uring instances to be registered. If UNIX
5734 * isn't enabled, then this causes a reference cycle and this
5735 * instance can never get freed. If UNIX is enabled we'll
5736 * handle it just fine, but there's still no point in allowing
5737 * a ring fd as it doesn't support regular read/write anyway.
5739 if (file
->f_op
== &io_uring_fops
) {
5744 table
->files
[index
] = file
;
5748 for (i
= 0; i
< ctx
->nr_user_files
; i
++) {
5749 file
= io_file_from_index(ctx
, i
);
5753 for (i
= 0; i
< nr_tables
; i
++)
5754 kfree(ctx
->file_data
->table
[i
].files
);
5756 kfree(ctx
->file_data
->table
);
5757 kfree(ctx
->file_data
);
5758 ctx
->file_data
= NULL
;
5759 ctx
->nr_user_files
= 0;
5763 ret
= io_sqe_files_scm(ctx
);
5765 io_sqe_files_unregister(ctx
);
5770 static int io_sqe_file_register(struct io_ring_ctx
*ctx
, struct file
*file
,
5773 #if defined(CONFIG_UNIX)
5774 struct sock
*sock
= ctx
->ring_sock
->sk
;
5775 struct sk_buff_head
*head
= &sock
->sk_receive_queue
;
5776 struct sk_buff
*skb
;
5779 * See if we can merge this file into an existing skb SCM_RIGHTS
5780 * file set. If there's no room, fall back to allocating a new skb
5781 * and filling it in.
5783 spin_lock_irq(&head
->lock
);
5784 skb
= skb_peek(head
);
5786 struct scm_fp_list
*fpl
= UNIXCB(skb
).fp
;
5788 if (fpl
->count
< SCM_MAX_FD
) {
5789 __skb_unlink(skb
, head
);
5790 spin_unlock_irq(&head
->lock
);
5791 fpl
->fp
[fpl
->count
] = get_file(file
);
5792 unix_inflight(fpl
->user
, fpl
->fp
[fpl
->count
]);
5794 spin_lock_irq(&head
->lock
);
5795 __skb_queue_head(head
, skb
);
5800 spin_unlock_irq(&head
->lock
);
5807 return __io_sqe_files_scm(ctx
, 1, index
);
5813 static void io_atomic_switch(struct percpu_ref
*ref
)
5815 struct fixed_file_data
*data
;
5818 * Juggle reference to ensure we hit zero, if needed, so we can
5819 * switch back to percpu mode
5821 data
= container_of(ref
, struct fixed_file_data
, refs
);
5822 percpu_ref_put(&data
->refs
);
5823 percpu_ref_get(&data
->refs
);
5826 static bool io_queue_file_removal(struct fixed_file_data
*data
,
5829 struct io_file_put
*pfile
, pfile_stack
;
5832 * If we fail allocating the struct we need for doing async reomval
5833 * of this file, just punt to sync and wait for it.
5835 pfile
= kzalloc(sizeof(*pfile
), GFP_KERNEL
);
5837 pfile
= &pfile_stack
;
5838 pfile
->free_pfile
= false;
5840 pfile
->free_pfile
= true;
5843 llist_add(&pfile
->llist
, &data
->put_llist
);
5845 if (pfile
== &pfile_stack
) {
5846 percpu_ref_switch_to_atomic(&data
->refs
, io_atomic_switch
);
5847 flush_work(&data
->ref_work
);
5854 static int __io_sqe_files_update(struct io_ring_ctx
*ctx
,
5855 struct io_uring_files_update
*up
,
5858 struct fixed_file_data
*data
= ctx
->file_data
;
5859 bool ref_switch
= false;
5865 if (check_add_overflow(up
->offset
, nr_args
, &done
))
5867 if (done
> ctx
->nr_user_files
)
5871 fds
= u64_to_user_ptr(up
->fds
);
5873 struct fixed_file_table
*table
;
5877 if (copy_from_user(&fd
, &fds
[done
], sizeof(fd
))) {
5881 i
= array_index_nospec(up
->offset
, ctx
->nr_user_files
);
5882 table
= &ctx
->file_data
->table
[i
>> IORING_FILE_TABLE_SHIFT
];
5883 index
= i
& IORING_FILE_TABLE_MASK
;
5884 if (table
->files
[index
]) {
5885 file
= io_file_from_index(ctx
, index
);
5886 table
->files
[index
] = NULL
;
5887 if (io_queue_file_removal(data
, file
))
5897 * Don't allow io_uring instances to be registered. If
5898 * UNIX isn't enabled, then this causes a reference
5899 * cycle and this instance can never get freed. If UNIX
5900 * is enabled we'll handle it just fine, but there's
5901 * still no point in allowing a ring fd as it doesn't
5902 * support regular read/write anyway.
5904 if (file
->f_op
== &io_uring_fops
) {
5909 table
->files
[index
] = file
;
5910 err
= io_sqe_file_register(ctx
, file
, i
);
5920 percpu_ref_switch_to_atomic(&data
->refs
, io_atomic_switch
);
5922 return done
? done
: err
;
5924 static int io_sqe_files_update(struct io_ring_ctx
*ctx
, void __user
*arg
,
5927 struct io_uring_files_update up
;
5929 if (!ctx
->file_data
)
5933 if (copy_from_user(&up
, arg
, sizeof(up
)))
5938 return __io_sqe_files_update(ctx
, &up
, nr_args
);
5941 static void io_put_work(struct io_wq_work
*work
)
5943 struct io_kiocb
*req
= container_of(work
, struct io_kiocb
, work
);
5948 static void io_get_work(struct io_wq_work
*work
)
5950 struct io_kiocb
*req
= container_of(work
, struct io_kiocb
, work
);
5952 refcount_inc(&req
->refs
);
5955 static int io_init_wq_offload(struct io_ring_ctx
*ctx
,
5956 struct io_uring_params
*p
)
5958 struct io_wq_data data
;
5960 struct io_ring_ctx
*ctx_attach
;
5961 unsigned int concurrency
;
5964 data
.user
= ctx
->user
;
5965 data
.get_work
= io_get_work
;
5966 data
.put_work
= io_put_work
;
5968 if (!(p
->flags
& IORING_SETUP_ATTACH_WQ
)) {
5969 /* Do QD, or 4 * CPUS, whatever is smallest */
5970 concurrency
= min(ctx
->sq_entries
, 4 * num_online_cpus());
5972 ctx
->io_wq
= io_wq_create(concurrency
, &data
);
5973 if (IS_ERR(ctx
->io_wq
)) {
5974 ret
= PTR_ERR(ctx
->io_wq
);
5980 f
= fdget(p
->wq_fd
);
5984 if (f
.file
->f_op
!= &io_uring_fops
) {
5989 ctx_attach
= f
.file
->private_data
;
5990 /* @io_wq is protected by holding the fd */
5991 if (!io_wq_get(ctx_attach
->io_wq
, &data
)) {
5996 ctx
->io_wq
= ctx_attach
->io_wq
;
6002 static int io_sq_offload_start(struct io_ring_ctx
*ctx
,
6003 struct io_uring_params
*p
)
6007 init_waitqueue_head(&ctx
->sqo_wait
);
6008 mmgrab(current
->mm
);
6009 ctx
->sqo_mm
= current
->mm
;
6011 if (ctx
->flags
& IORING_SETUP_SQPOLL
) {
6013 if (!capable(CAP_SYS_ADMIN
))
6016 ctx
->sq_thread_idle
= msecs_to_jiffies(p
->sq_thread_idle
);
6017 if (!ctx
->sq_thread_idle
)
6018 ctx
->sq_thread_idle
= HZ
;
6020 if (p
->flags
& IORING_SETUP_SQ_AFF
) {
6021 int cpu
= p
->sq_thread_cpu
;
6024 if (cpu
>= nr_cpu_ids
)
6026 if (!cpu_online(cpu
))
6029 ctx
->sqo_thread
= kthread_create_on_cpu(io_sq_thread
,
6033 ctx
->sqo_thread
= kthread_create(io_sq_thread
, ctx
,
6036 if (IS_ERR(ctx
->sqo_thread
)) {
6037 ret
= PTR_ERR(ctx
->sqo_thread
);
6038 ctx
->sqo_thread
= NULL
;
6041 wake_up_process(ctx
->sqo_thread
);
6042 } else if (p
->flags
& IORING_SETUP_SQ_AFF
) {
6043 /* Can't have SQ_AFF without SQPOLL */
6048 ret
= io_init_wq_offload(ctx
, p
);
6054 io_finish_async(ctx
);
6055 mmdrop(ctx
->sqo_mm
);
6060 static void io_unaccount_mem(struct user_struct
*user
, unsigned long nr_pages
)
6062 atomic_long_sub(nr_pages
, &user
->locked_vm
);
6065 static int io_account_mem(struct user_struct
*user
, unsigned long nr_pages
)
6067 unsigned long page_limit
, cur_pages
, new_pages
;
6069 /* Don't allow more pages than we can safely lock */
6070 page_limit
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
6073 cur_pages
= atomic_long_read(&user
->locked_vm
);
6074 new_pages
= cur_pages
+ nr_pages
;
6075 if (new_pages
> page_limit
)
6077 } while (atomic_long_cmpxchg(&user
->locked_vm
, cur_pages
,
6078 new_pages
) != cur_pages
);
6083 static void io_mem_free(void *ptr
)
6090 page
= virt_to_head_page(ptr
);
6091 if (put_page_testzero(page
))
6092 free_compound_page(page
);
6095 static void *io_mem_alloc(size_t size
)
6097 gfp_t gfp_flags
= GFP_KERNEL
| __GFP_ZERO
| __GFP_NOWARN
| __GFP_COMP
|
6100 return (void *) __get_free_pages(gfp_flags
, get_order(size
));
6103 static unsigned long rings_size(unsigned sq_entries
, unsigned cq_entries
,
6106 struct io_rings
*rings
;
6107 size_t off
, sq_array_size
;
6109 off
= struct_size(rings
, cqes
, cq_entries
);
6110 if (off
== SIZE_MAX
)
6114 off
= ALIGN(off
, SMP_CACHE_BYTES
);
6119 sq_array_size
= array_size(sizeof(u32
), sq_entries
);
6120 if (sq_array_size
== SIZE_MAX
)
6123 if (check_add_overflow(off
, sq_array_size
, &off
))
6132 static unsigned long ring_pages(unsigned sq_entries
, unsigned cq_entries
)
6136 pages
= (size_t)1 << get_order(
6137 rings_size(sq_entries
, cq_entries
, NULL
));
6138 pages
+= (size_t)1 << get_order(
6139 array_size(sizeof(struct io_uring_sqe
), sq_entries
));
6144 static int io_sqe_buffer_unregister(struct io_ring_ctx
*ctx
)
6148 if (!ctx
->user_bufs
)
6151 for (i
= 0; i
< ctx
->nr_user_bufs
; i
++) {
6152 struct io_mapped_ubuf
*imu
= &ctx
->user_bufs
[i
];
6154 for (j
= 0; j
< imu
->nr_bvecs
; j
++)
6155 unpin_user_page(imu
->bvec
[j
].bv_page
);
6157 if (ctx
->account_mem
)
6158 io_unaccount_mem(ctx
->user
, imu
->nr_bvecs
);
6163 kfree(ctx
->user_bufs
);
6164 ctx
->user_bufs
= NULL
;
6165 ctx
->nr_user_bufs
= 0;
6169 static int io_copy_iov(struct io_ring_ctx
*ctx
, struct iovec
*dst
,
6170 void __user
*arg
, unsigned index
)
6172 struct iovec __user
*src
;
6174 #ifdef CONFIG_COMPAT
6176 struct compat_iovec __user
*ciovs
;
6177 struct compat_iovec ciov
;
6179 ciovs
= (struct compat_iovec __user
*) arg
;
6180 if (copy_from_user(&ciov
, &ciovs
[index
], sizeof(ciov
)))
6183 dst
->iov_base
= u64_to_user_ptr((u64
)ciov
.iov_base
);
6184 dst
->iov_len
= ciov
.iov_len
;
6188 src
= (struct iovec __user
*) arg
;
6189 if (copy_from_user(dst
, &src
[index
], sizeof(*dst
)))
6194 static int io_sqe_buffer_register(struct io_ring_ctx
*ctx
, void __user
*arg
,
6197 struct vm_area_struct
**vmas
= NULL
;
6198 struct page
**pages
= NULL
;
6199 int i
, j
, got_pages
= 0;
6204 if (!nr_args
|| nr_args
> UIO_MAXIOV
)
6207 ctx
->user_bufs
= kcalloc(nr_args
, sizeof(struct io_mapped_ubuf
),
6209 if (!ctx
->user_bufs
)
6212 for (i
= 0; i
< nr_args
; i
++) {
6213 struct io_mapped_ubuf
*imu
= &ctx
->user_bufs
[i
];
6214 unsigned long off
, start
, end
, ubuf
;
6219 ret
= io_copy_iov(ctx
, &iov
, arg
, i
);
6224 * Don't impose further limits on the size and buffer
6225 * constraints here, we'll -EINVAL later when IO is
6226 * submitted if they are wrong.
6229 if (!iov
.iov_base
|| !iov
.iov_len
)
6232 /* arbitrary limit, but we need something */
6233 if (iov
.iov_len
> SZ_1G
)
6236 ubuf
= (unsigned long) iov
.iov_base
;
6237 end
= (ubuf
+ iov
.iov_len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
6238 start
= ubuf
>> PAGE_SHIFT
;
6239 nr_pages
= end
- start
;
6241 if (ctx
->account_mem
) {
6242 ret
= io_account_mem(ctx
->user
, nr_pages
);
6248 if (!pages
|| nr_pages
> got_pages
) {
6251 pages
= kvmalloc_array(nr_pages
, sizeof(struct page
*),
6253 vmas
= kvmalloc_array(nr_pages
,
6254 sizeof(struct vm_area_struct
*),
6256 if (!pages
|| !vmas
) {
6258 if (ctx
->account_mem
)
6259 io_unaccount_mem(ctx
->user
, nr_pages
);
6262 got_pages
= nr_pages
;
6265 imu
->bvec
= kvmalloc_array(nr_pages
, sizeof(struct bio_vec
),
6269 if (ctx
->account_mem
)
6270 io_unaccount_mem(ctx
->user
, nr_pages
);
6275 down_read(¤t
->mm
->mmap_sem
);
6276 pret
= pin_user_pages(ubuf
, nr_pages
,
6277 FOLL_WRITE
| FOLL_LONGTERM
,
6279 if (pret
== nr_pages
) {
6280 /* don't support file backed memory */
6281 for (j
= 0; j
< nr_pages
; j
++) {
6282 struct vm_area_struct
*vma
= vmas
[j
];
6285 !is_file_hugepages(vma
->vm_file
)) {
6291 ret
= pret
< 0 ? pret
: -EFAULT
;
6293 up_read(¤t
->mm
->mmap_sem
);
6296 * if we did partial map, or found file backed vmas,
6297 * release any pages we did get
6300 unpin_user_pages(pages
, pret
);
6301 if (ctx
->account_mem
)
6302 io_unaccount_mem(ctx
->user
, nr_pages
);
6307 off
= ubuf
& ~PAGE_MASK
;
6309 for (j
= 0; j
< nr_pages
; j
++) {
6312 vec_len
= min_t(size_t, size
, PAGE_SIZE
- off
);
6313 imu
->bvec
[j
].bv_page
= pages
[j
];
6314 imu
->bvec
[j
].bv_len
= vec_len
;
6315 imu
->bvec
[j
].bv_offset
= off
;
6319 /* store original address for later verification */
6321 imu
->len
= iov
.iov_len
;
6322 imu
->nr_bvecs
= nr_pages
;
6324 ctx
->nr_user_bufs
++;
6332 io_sqe_buffer_unregister(ctx
);
6336 static int io_eventfd_register(struct io_ring_ctx
*ctx
, void __user
*arg
)
6338 __s32 __user
*fds
= arg
;
6344 if (copy_from_user(&fd
, fds
, sizeof(*fds
)))
6347 ctx
->cq_ev_fd
= eventfd_ctx_fdget(fd
);
6348 if (IS_ERR(ctx
->cq_ev_fd
)) {
6349 int ret
= PTR_ERR(ctx
->cq_ev_fd
);
6350 ctx
->cq_ev_fd
= NULL
;
6357 static int io_eventfd_unregister(struct io_ring_ctx
*ctx
)
6359 if (ctx
->cq_ev_fd
) {
6360 eventfd_ctx_put(ctx
->cq_ev_fd
);
6361 ctx
->cq_ev_fd
= NULL
;
6368 static void io_ring_ctx_free(struct io_ring_ctx
*ctx
)
6370 io_finish_async(ctx
);
6372 mmdrop(ctx
->sqo_mm
);
6374 io_iopoll_reap_events(ctx
);
6375 io_sqe_buffer_unregister(ctx
);
6376 io_sqe_files_unregister(ctx
);
6377 io_eventfd_unregister(ctx
);
6378 idr_destroy(&ctx
->personality_idr
);
6380 #if defined(CONFIG_UNIX)
6381 if (ctx
->ring_sock
) {
6382 ctx
->ring_sock
->file
= NULL
; /* so that iput() is called */
6383 sock_release(ctx
->ring_sock
);
6387 io_mem_free(ctx
->rings
);
6388 io_mem_free(ctx
->sq_sqes
);
6390 percpu_ref_exit(&ctx
->refs
);
6391 if (ctx
->account_mem
)
6392 io_unaccount_mem(ctx
->user
,
6393 ring_pages(ctx
->sq_entries
, ctx
->cq_entries
));
6394 free_uid(ctx
->user
);
6395 put_cred(ctx
->creds
);
6396 kfree(ctx
->completions
);
6397 kfree(ctx
->cancel_hash
);
6398 kmem_cache_free(req_cachep
, ctx
->fallback_req
);
6402 static __poll_t
io_uring_poll(struct file
*file
, poll_table
*wait
)
6404 struct io_ring_ctx
*ctx
= file
->private_data
;
6407 poll_wait(file
, &ctx
->cq_wait
, wait
);
6409 * synchronizes with barrier from wq_has_sleeper call in
6413 if (READ_ONCE(ctx
->rings
->sq
.tail
) - ctx
->cached_sq_head
!=
6414 ctx
->rings
->sq_ring_entries
)
6415 mask
|= EPOLLOUT
| EPOLLWRNORM
;
6416 if (io_cqring_events(ctx
, false))
6417 mask
|= EPOLLIN
| EPOLLRDNORM
;
6422 static int io_uring_fasync(int fd
, struct file
*file
, int on
)
6424 struct io_ring_ctx
*ctx
= file
->private_data
;
6426 return fasync_helper(fd
, file
, on
, &ctx
->cq_fasync
);
6429 static int io_remove_personalities(int id
, void *p
, void *data
)
6431 struct io_ring_ctx
*ctx
= data
;
6432 const struct cred
*cred
;
6434 cred
= idr_remove(&ctx
->personality_idr
, id
);
6440 static void io_ring_ctx_wait_and_kill(struct io_ring_ctx
*ctx
)
6442 mutex_lock(&ctx
->uring_lock
);
6443 percpu_ref_kill(&ctx
->refs
);
6444 mutex_unlock(&ctx
->uring_lock
);
6447 * Wait for sq thread to idle, if we have one. It won't spin on new
6448 * work after we've killed the ctx ref above. This is important to do
6449 * before we cancel existing commands, as the thread could otherwise
6450 * be queueing new work post that. If that's work we need to cancel,
6451 * it could cause shutdown to hang.
6453 while (ctx
->sqo_thread
&& !wq_has_sleeper(&ctx
->sqo_wait
))
6456 io_kill_timeouts(ctx
);
6457 io_poll_remove_all(ctx
);
6460 io_wq_cancel_all(ctx
->io_wq
);
6462 io_iopoll_reap_events(ctx
);
6463 /* if we failed setting up the ctx, we might not have any rings */
6465 io_cqring_overflow_flush(ctx
, true);
6466 idr_for_each(&ctx
->personality_idr
, io_remove_personalities
, ctx
);
6467 wait_for_completion(&ctx
->completions
[0]);
6468 io_ring_ctx_free(ctx
);
6471 static int io_uring_release(struct inode
*inode
, struct file
*file
)
6473 struct io_ring_ctx
*ctx
= file
->private_data
;
6475 file
->private_data
= NULL
;
6476 io_ring_ctx_wait_and_kill(ctx
);
6480 static void io_uring_cancel_files(struct io_ring_ctx
*ctx
,
6481 struct files_struct
*files
)
6483 struct io_kiocb
*req
;
6486 while (!list_empty_careful(&ctx
->inflight_list
)) {
6487 struct io_kiocb
*cancel_req
= NULL
;
6489 spin_lock_irq(&ctx
->inflight_lock
);
6490 list_for_each_entry(req
, &ctx
->inflight_list
, inflight_entry
) {
6491 if (req
->work
.files
!= files
)
6493 /* req is being completed, ignore */
6494 if (!refcount_inc_not_zero(&req
->refs
))
6500 prepare_to_wait(&ctx
->inflight_wait
, &wait
,
6501 TASK_UNINTERRUPTIBLE
);
6502 spin_unlock_irq(&ctx
->inflight_lock
);
6504 /* We need to keep going until we don't find a matching req */
6508 if (cancel_req
->flags
& REQ_F_OVERFLOW
) {
6509 spin_lock_irq(&ctx
->completion_lock
);
6510 list_del(&cancel_req
->list
);
6511 cancel_req
->flags
&= ~REQ_F_OVERFLOW
;
6512 if (list_empty(&ctx
->cq_overflow_list
)) {
6513 clear_bit(0, &ctx
->sq_check_overflow
);
6514 clear_bit(0, &ctx
->cq_check_overflow
);
6516 spin_unlock_irq(&ctx
->completion_lock
);
6518 WRITE_ONCE(ctx
->rings
->cq_overflow
,
6519 atomic_inc_return(&ctx
->cached_cq_overflow
));
6522 * Put inflight ref and overflow ref. If that's
6523 * all we had, then we're done with this request.
6525 if (refcount_sub_and_test(2, &cancel_req
->refs
)) {
6526 io_put_req(cancel_req
);
6531 io_wq_cancel_work(ctx
->io_wq
, &cancel_req
->work
);
6532 io_put_req(cancel_req
);
6535 finish_wait(&ctx
->inflight_wait
, &wait
);
6538 static int io_uring_flush(struct file
*file
, void *data
)
6540 struct io_ring_ctx
*ctx
= file
->private_data
;
6542 io_uring_cancel_files(ctx
, data
);
6545 * If the task is going away, cancel work it may have pending
6547 if (fatal_signal_pending(current
) || (current
->flags
& PF_EXITING
))
6548 io_wq_cancel_pid(ctx
->io_wq
, task_pid_vnr(current
));
6553 static void *io_uring_validate_mmap_request(struct file
*file
,
6554 loff_t pgoff
, size_t sz
)
6556 struct io_ring_ctx
*ctx
= file
->private_data
;
6557 loff_t offset
= pgoff
<< PAGE_SHIFT
;
6562 case IORING_OFF_SQ_RING
:
6563 case IORING_OFF_CQ_RING
:
6566 case IORING_OFF_SQES
:
6570 return ERR_PTR(-EINVAL
);
6573 page
= virt_to_head_page(ptr
);
6574 if (sz
> page_size(page
))
6575 return ERR_PTR(-EINVAL
);
6582 static int io_uring_mmap(struct file
*file
, struct vm_area_struct
*vma
)
6584 size_t sz
= vma
->vm_end
- vma
->vm_start
;
6588 ptr
= io_uring_validate_mmap_request(file
, vma
->vm_pgoff
, sz
);
6590 return PTR_ERR(ptr
);
6592 pfn
= virt_to_phys(ptr
) >> PAGE_SHIFT
;
6593 return remap_pfn_range(vma
, vma
->vm_start
, pfn
, sz
, vma
->vm_page_prot
);
6596 #else /* !CONFIG_MMU */
6598 static int io_uring_mmap(struct file
*file
, struct vm_area_struct
*vma
)
6600 return vma
->vm_flags
& (VM_SHARED
| VM_MAYSHARE
) ? 0 : -EINVAL
;
6603 static unsigned int io_uring_nommu_mmap_capabilities(struct file
*file
)
6605 return NOMMU_MAP_DIRECT
| NOMMU_MAP_READ
| NOMMU_MAP_WRITE
;
6608 static unsigned long io_uring_nommu_get_unmapped_area(struct file
*file
,
6609 unsigned long addr
, unsigned long len
,
6610 unsigned long pgoff
, unsigned long flags
)
6614 ptr
= io_uring_validate_mmap_request(file
, pgoff
, len
);
6616 return PTR_ERR(ptr
);
6618 return (unsigned long) ptr
;
6621 #endif /* !CONFIG_MMU */
6623 SYSCALL_DEFINE6(io_uring_enter
, unsigned int, fd
, u32
, to_submit
,
6624 u32
, min_complete
, u32
, flags
, const sigset_t __user
*, sig
,
6627 struct io_ring_ctx
*ctx
;
6632 if (flags
& ~(IORING_ENTER_GETEVENTS
| IORING_ENTER_SQ_WAKEUP
))
6640 if (f
.file
->f_op
!= &io_uring_fops
)
6644 ctx
= f
.file
->private_data
;
6645 if (!percpu_ref_tryget(&ctx
->refs
))
6649 * For SQ polling, the thread will do all submissions and completions.
6650 * Just return the requested submit count, and wake the thread if
6654 if (ctx
->flags
& IORING_SETUP_SQPOLL
) {
6655 if (!list_empty_careful(&ctx
->cq_overflow_list
))
6656 io_cqring_overflow_flush(ctx
, false);
6657 if (flags
& IORING_ENTER_SQ_WAKEUP
)
6658 wake_up(&ctx
->sqo_wait
);
6659 submitted
= to_submit
;
6660 } else if (to_submit
) {
6661 struct mm_struct
*cur_mm
;
6663 mutex_lock(&ctx
->uring_lock
);
6664 /* already have mm, so io_submit_sqes() won't try to grab it */
6665 cur_mm
= ctx
->sqo_mm
;
6666 submitted
= io_submit_sqes(ctx
, to_submit
, f
.file
, fd
,
6668 mutex_unlock(&ctx
->uring_lock
);
6670 if (submitted
!= to_submit
)
6673 if (flags
& IORING_ENTER_GETEVENTS
) {
6674 unsigned nr_events
= 0;
6676 min_complete
= min(min_complete
, ctx
->cq_entries
);
6678 if (ctx
->flags
& IORING_SETUP_IOPOLL
) {
6679 ret
= io_iopoll_check(ctx
, &nr_events
, min_complete
);
6681 ret
= io_cqring_wait(ctx
, min_complete
, sig
, sigsz
);
6686 percpu_ref_put(&ctx
->refs
);
6689 return submitted
? submitted
: ret
;
6692 #ifdef CONFIG_PROC_FS
6693 static int io_uring_show_cred(int id
, void *p
, void *data
)
6695 const struct cred
*cred
= p
;
6696 struct seq_file
*m
= data
;
6697 struct user_namespace
*uns
= seq_user_ns(m
);
6698 struct group_info
*gi
;
6703 seq_printf(m
, "%5d\n", id
);
6704 seq_put_decimal_ull(m
, "\tUid:\t", from_kuid_munged(uns
, cred
->uid
));
6705 seq_put_decimal_ull(m
, "\t\t", from_kuid_munged(uns
, cred
->euid
));
6706 seq_put_decimal_ull(m
, "\t\t", from_kuid_munged(uns
, cred
->suid
));
6707 seq_put_decimal_ull(m
, "\t\t", from_kuid_munged(uns
, cred
->fsuid
));
6708 seq_put_decimal_ull(m
, "\n\tGid:\t", from_kgid_munged(uns
, cred
->gid
));
6709 seq_put_decimal_ull(m
, "\t\t", from_kgid_munged(uns
, cred
->egid
));
6710 seq_put_decimal_ull(m
, "\t\t", from_kgid_munged(uns
, cred
->sgid
));
6711 seq_put_decimal_ull(m
, "\t\t", from_kgid_munged(uns
, cred
->fsgid
));
6712 seq_puts(m
, "\n\tGroups:\t");
6713 gi
= cred
->group_info
;
6714 for (g
= 0; g
< gi
->ngroups
; g
++) {
6715 seq_put_decimal_ull(m
, g
? " " : "",
6716 from_kgid_munged(uns
, gi
->gid
[g
]));
6718 seq_puts(m
, "\n\tCapEff:\t");
6719 cap
= cred
->cap_effective
;
6720 CAP_FOR_EACH_U32(__capi
)
6721 seq_put_hex_ll(m
, NULL
, cap
.cap
[CAP_LAST_U32
- __capi
], 8);
6726 static void __io_uring_show_fdinfo(struct io_ring_ctx
*ctx
, struct seq_file
*m
)
6730 mutex_lock(&ctx
->uring_lock
);
6731 seq_printf(m
, "UserFiles:\t%u\n", ctx
->nr_user_files
);
6732 for (i
= 0; i
< ctx
->nr_user_files
; i
++) {
6733 struct fixed_file_table
*table
;
6736 table
= &ctx
->file_data
->table
[i
>> IORING_FILE_TABLE_SHIFT
];
6737 f
= table
->files
[i
& IORING_FILE_TABLE_MASK
];
6739 seq_printf(m
, "%5u: %s\n", i
, file_dentry(f
)->d_iname
);
6741 seq_printf(m
, "%5u: <none>\n", i
);
6743 seq_printf(m
, "UserBufs:\t%u\n", ctx
->nr_user_bufs
);
6744 for (i
= 0; i
< ctx
->nr_user_bufs
; i
++) {
6745 struct io_mapped_ubuf
*buf
= &ctx
->user_bufs
[i
];
6747 seq_printf(m
, "%5u: 0x%llx/%u\n", i
, buf
->ubuf
,
6748 (unsigned int) buf
->len
);
6750 if (!idr_is_empty(&ctx
->personality_idr
)) {
6751 seq_printf(m
, "Personalities:\n");
6752 idr_for_each(&ctx
->personality_idr
, io_uring_show_cred
, m
);
6754 mutex_unlock(&ctx
->uring_lock
);
6757 static void io_uring_show_fdinfo(struct seq_file
*m
, struct file
*f
)
6759 struct io_ring_ctx
*ctx
= f
->private_data
;
6761 if (percpu_ref_tryget(&ctx
->refs
)) {
6762 __io_uring_show_fdinfo(ctx
, m
);
6763 percpu_ref_put(&ctx
->refs
);
6768 static const struct file_operations io_uring_fops
= {
6769 .release
= io_uring_release
,
6770 .flush
= io_uring_flush
,
6771 .mmap
= io_uring_mmap
,
6773 .get_unmapped_area
= io_uring_nommu_get_unmapped_area
,
6774 .mmap_capabilities
= io_uring_nommu_mmap_capabilities
,
6776 .poll
= io_uring_poll
,
6777 .fasync
= io_uring_fasync
,
6778 #ifdef CONFIG_PROC_FS
6779 .show_fdinfo
= io_uring_show_fdinfo
,
6783 static int io_allocate_scq_urings(struct io_ring_ctx
*ctx
,
6784 struct io_uring_params
*p
)
6786 struct io_rings
*rings
;
6787 size_t size
, sq_array_offset
;
6789 size
= rings_size(p
->sq_entries
, p
->cq_entries
, &sq_array_offset
);
6790 if (size
== SIZE_MAX
)
6793 rings
= io_mem_alloc(size
);
6798 ctx
->sq_array
= (u32
*)((char *)rings
+ sq_array_offset
);
6799 rings
->sq_ring_mask
= p
->sq_entries
- 1;
6800 rings
->cq_ring_mask
= p
->cq_entries
- 1;
6801 rings
->sq_ring_entries
= p
->sq_entries
;
6802 rings
->cq_ring_entries
= p
->cq_entries
;
6803 ctx
->sq_mask
= rings
->sq_ring_mask
;
6804 ctx
->cq_mask
= rings
->cq_ring_mask
;
6805 ctx
->sq_entries
= rings
->sq_ring_entries
;
6806 ctx
->cq_entries
= rings
->cq_ring_entries
;
6808 size
= array_size(sizeof(struct io_uring_sqe
), p
->sq_entries
);
6809 if (size
== SIZE_MAX
) {
6810 io_mem_free(ctx
->rings
);
6815 ctx
->sq_sqes
= io_mem_alloc(size
);
6816 if (!ctx
->sq_sqes
) {
6817 io_mem_free(ctx
->rings
);
6826 * Allocate an anonymous fd, this is what constitutes the application
6827 * visible backing of an io_uring instance. The application mmaps this
6828 * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
6829 * we have to tie this fd to a socket for file garbage collection purposes.
6831 static int io_uring_get_fd(struct io_ring_ctx
*ctx
)
6836 #if defined(CONFIG_UNIX)
6837 ret
= sock_create_kern(&init_net
, PF_UNIX
, SOCK_RAW
, IPPROTO_IP
,
6843 ret
= get_unused_fd_flags(O_RDWR
| O_CLOEXEC
);
6847 file
= anon_inode_getfile("[io_uring]", &io_uring_fops
, ctx
,
6848 O_RDWR
| O_CLOEXEC
);
6851 ret
= PTR_ERR(file
);
6855 #if defined(CONFIG_UNIX)
6856 ctx
->ring_sock
->file
= file
;
6858 fd_install(ret
, file
);
6861 #if defined(CONFIG_UNIX)
6862 sock_release(ctx
->ring_sock
);
6863 ctx
->ring_sock
= NULL
;
6868 static int io_uring_create(unsigned entries
, struct io_uring_params
*p
)
6870 struct user_struct
*user
= NULL
;
6871 struct io_ring_ctx
*ctx
;
6877 if (entries
> IORING_MAX_ENTRIES
) {
6878 if (!(p
->flags
& IORING_SETUP_CLAMP
))
6880 entries
= IORING_MAX_ENTRIES
;
6884 * Use twice as many entries for the CQ ring. It's possible for the
6885 * application to drive a higher depth than the size of the SQ ring,
6886 * since the sqes are only used at submission time. This allows for
6887 * some flexibility in overcommitting a bit. If the application has
6888 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
6889 * of CQ ring entries manually.
6891 p
->sq_entries
= roundup_pow_of_two(entries
);
6892 if (p
->flags
& IORING_SETUP_CQSIZE
) {
6894 * If IORING_SETUP_CQSIZE is set, we do the same roundup
6895 * to a power-of-two, if it isn't already. We do NOT impose
6896 * any cq vs sq ring sizing.
6898 if (p
->cq_entries
< p
->sq_entries
)
6900 if (p
->cq_entries
> IORING_MAX_CQ_ENTRIES
) {
6901 if (!(p
->flags
& IORING_SETUP_CLAMP
))
6903 p
->cq_entries
= IORING_MAX_CQ_ENTRIES
;
6905 p
->cq_entries
= roundup_pow_of_two(p
->cq_entries
);
6907 p
->cq_entries
= 2 * p
->sq_entries
;
6910 user
= get_uid(current_user());
6911 account_mem
= !capable(CAP_IPC_LOCK
);
6914 ret
= io_account_mem(user
,
6915 ring_pages(p
->sq_entries
, p
->cq_entries
));
6922 ctx
= io_ring_ctx_alloc(p
);
6925 io_unaccount_mem(user
, ring_pages(p
->sq_entries
,
6930 ctx
->compat
= in_compat_syscall();
6931 ctx
->account_mem
= account_mem
;
6933 ctx
->creds
= get_current_cred();
6935 ret
= io_allocate_scq_urings(ctx
, p
);
6939 ret
= io_sq_offload_start(ctx
, p
);
6943 memset(&p
->sq_off
, 0, sizeof(p
->sq_off
));
6944 p
->sq_off
.head
= offsetof(struct io_rings
, sq
.head
);
6945 p
->sq_off
.tail
= offsetof(struct io_rings
, sq
.tail
);
6946 p
->sq_off
.ring_mask
= offsetof(struct io_rings
, sq_ring_mask
);
6947 p
->sq_off
.ring_entries
= offsetof(struct io_rings
, sq_ring_entries
);
6948 p
->sq_off
.flags
= offsetof(struct io_rings
, sq_flags
);
6949 p
->sq_off
.dropped
= offsetof(struct io_rings
, sq_dropped
);
6950 p
->sq_off
.array
= (char *)ctx
->sq_array
- (char *)ctx
->rings
;
6952 memset(&p
->cq_off
, 0, sizeof(p
->cq_off
));
6953 p
->cq_off
.head
= offsetof(struct io_rings
, cq
.head
);
6954 p
->cq_off
.tail
= offsetof(struct io_rings
, cq
.tail
);
6955 p
->cq_off
.ring_mask
= offsetof(struct io_rings
, cq_ring_mask
);
6956 p
->cq_off
.ring_entries
= offsetof(struct io_rings
, cq_ring_entries
);
6957 p
->cq_off
.overflow
= offsetof(struct io_rings
, cq_overflow
);
6958 p
->cq_off
.cqes
= offsetof(struct io_rings
, cqes
);
6961 * Install ring fd as the very last thing, so we don't risk someone
6962 * having closed it before we finish setup
6964 ret
= io_uring_get_fd(ctx
);
6968 p
->features
= IORING_FEAT_SINGLE_MMAP
| IORING_FEAT_NODROP
|
6969 IORING_FEAT_SUBMIT_STABLE
| IORING_FEAT_RW_CUR_POS
|
6970 IORING_FEAT_CUR_PERSONALITY
;
6971 trace_io_uring_create(ret
, ctx
, p
->sq_entries
, p
->cq_entries
, p
->flags
);
6974 io_ring_ctx_wait_and_kill(ctx
);
6979 * Sets up an aio uring context, and returns the fd. Applications asks for a
6980 * ring size, we return the actual sq/cq ring sizes (among other things) in the
6981 * params structure passed in.
6983 static long io_uring_setup(u32 entries
, struct io_uring_params __user
*params
)
6985 struct io_uring_params p
;
6989 if (copy_from_user(&p
, params
, sizeof(p
)))
6991 for (i
= 0; i
< ARRAY_SIZE(p
.resv
); i
++) {
6996 if (p
.flags
& ~(IORING_SETUP_IOPOLL
| IORING_SETUP_SQPOLL
|
6997 IORING_SETUP_SQ_AFF
| IORING_SETUP_CQSIZE
|
6998 IORING_SETUP_CLAMP
| IORING_SETUP_ATTACH_WQ
))
7001 ret
= io_uring_create(entries
, &p
);
7005 if (copy_to_user(params
, &p
, sizeof(p
)))
7011 SYSCALL_DEFINE2(io_uring_setup
, u32
, entries
,
7012 struct io_uring_params __user
*, params
)
7014 return io_uring_setup(entries
, params
);
7017 static int io_probe(struct io_ring_ctx
*ctx
, void __user
*arg
, unsigned nr_args
)
7019 struct io_uring_probe
*p
;
7023 size
= struct_size(p
, ops
, nr_args
);
7024 if (size
== SIZE_MAX
)
7026 p
= kzalloc(size
, GFP_KERNEL
);
7031 if (copy_from_user(p
, arg
, size
))
7034 if (memchr_inv(p
, 0, size
))
7037 p
->last_op
= IORING_OP_LAST
- 1;
7038 if (nr_args
> IORING_OP_LAST
)
7039 nr_args
= IORING_OP_LAST
;
7041 for (i
= 0; i
< nr_args
; i
++) {
7043 if (!io_op_defs
[i
].not_supported
)
7044 p
->ops
[i
].flags
= IO_URING_OP_SUPPORTED
;
7049 if (copy_to_user(arg
, p
, size
))
7056 static int io_register_personality(struct io_ring_ctx
*ctx
)
7058 const struct cred
*creds
= get_current_cred();
7061 id
= idr_alloc_cyclic(&ctx
->personality_idr
, (void *) creds
, 1,
7062 USHRT_MAX
, GFP_KERNEL
);
7068 static int io_unregister_personality(struct io_ring_ctx
*ctx
, unsigned id
)
7070 const struct cred
*old_creds
;
7072 old_creds
= idr_remove(&ctx
->personality_idr
, id
);
7074 put_cred(old_creds
);
7081 static bool io_register_op_must_quiesce(int op
)
7084 case IORING_UNREGISTER_FILES
:
7085 case IORING_REGISTER_FILES_UPDATE
:
7086 case IORING_REGISTER_PROBE
:
7087 case IORING_REGISTER_PERSONALITY
:
7088 case IORING_UNREGISTER_PERSONALITY
:
7095 static int __io_uring_register(struct io_ring_ctx
*ctx
, unsigned opcode
,
7096 void __user
*arg
, unsigned nr_args
)
7097 __releases(ctx
->uring_lock
)
7098 __acquires(ctx
->uring_lock
)
7103 * We're inside the ring mutex, if the ref is already dying, then
7104 * someone else killed the ctx or is already going through
7105 * io_uring_register().
7107 if (percpu_ref_is_dying(&ctx
->refs
))
7110 if (io_register_op_must_quiesce(opcode
)) {
7111 percpu_ref_kill(&ctx
->refs
);
7114 * Drop uring mutex before waiting for references to exit. If
7115 * another thread is currently inside io_uring_enter() it might
7116 * need to grab the uring_lock to make progress. If we hold it
7117 * here across the drain wait, then we can deadlock. It's safe
7118 * to drop the mutex here, since no new references will come in
7119 * after we've killed the percpu ref.
7121 mutex_unlock(&ctx
->uring_lock
);
7122 ret
= wait_for_completion_interruptible(&ctx
->completions
[0]);
7123 mutex_lock(&ctx
->uring_lock
);
7125 percpu_ref_resurrect(&ctx
->refs
);
7132 case IORING_REGISTER_BUFFERS
:
7133 ret
= io_sqe_buffer_register(ctx
, arg
, nr_args
);
7135 case IORING_UNREGISTER_BUFFERS
:
7139 ret
= io_sqe_buffer_unregister(ctx
);
7141 case IORING_REGISTER_FILES
:
7142 ret
= io_sqe_files_register(ctx
, arg
, nr_args
);
7144 case IORING_UNREGISTER_FILES
:
7148 ret
= io_sqe_files_unregister(ctx
);
7150 case IORING_REGISTER_FILES_UPDATE
:
7151 ret
= io_sqe_files_update(ctx
, arg
, nr_args
);
7153 case IORING_REGISTER_EVENTFD
:
7154 case IORING_REGISTER_EVENTFD_ASYNC
:
7158 ret
= io_eventfd_register(ctx
, arg
);
7161 if (opcode
== IORING_REGISTER_EVENTFD_ASYNC
)
7162 ctx
->eventfd_async
= 1;
7164 ctx
->eventfd_async
= 0;
7166 case IORING_UNREGISTER_EVENTFD
:
7170 ret
= io_eventfd_unregister(ctx
);
7172 case IORING_REGISTER_PROBE
:
7174 if (!arg
|| nr_args
> 256)
7176 ret
= io_probe(ctx
, arg
, nr_args
);
7178 case IORING_REGISTER_PERSONALITY
:
7182 ret
= io_register_personality(ctx
);
7184 case IORING_UNREGISTER_PERSONALITY
:
7188 ret
= io_unregister_personality(ctx
, nr_args
);
7195 if (io_register_op_must_quiesce(opcode
)) {
7196 /* bring the ctx back to life */
7197 percpu_ref_reinit(&ctx
->refs
);
7199 reinit_completion(&ctx
->completions
[0]);
7204 SYSCALL_DEFINE4(io_uring_register
, unsigned int, fd
, unsigned int, opcode
,
7205 void __user
*, arg
, unsigned int, nr_args
)
7207 struct io_ring_ctx
*ctx
;
7216 if (f
.file
->f_op
!= &io_uring_fops
)
7219 ctx
= f
.file
->private_data
;
7221 mutex_lock(&ctx
->uring_lock
);
7222 ret
= __io_uring_register(ctx
, opcode
, arg
, nr_args
);
7223 mutex_unlock(&ctx
->uring_lock
);
7224 trace_io_uring_register(ctx
, opcode
, ctx
->nr_user_files
, ctx
->nr_user_bufs
,
7225 ctx
->cq_ev_fd
!= NULL
, ret
);
7231 static int __init
io_uring_init(void)
7233 #define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
7234 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
7235 BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
7238 #define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
7239 __BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
7240 BUILD_BUG_ON(sizeof(struct io_uring_sqe
) != 64);
7241 BUILD_BUG_SQE_ELEM(0, __u8
, opcode
);
7242 BUILD_BUG_SQE_ELEM(1, __u8
, flags
);
7243 BUILD_BUG_SQE_ELEM(2, __u16
, ioprio
);
7244 BUILD_BUG_SQE_ELEM(4, __s32
, fd
);
7245 BUILD_BUG_SQE_ELEM(8, __u64
, off
);
7246 BUILD_BUG_SQE_ELEM(8, __u64
, addr2
);
7247 BUILD_BUG_SQE_ELEM(16, __u64
, addr
);
7248 BUILD_BUG_SQE_ELEM(24, __u32
, len
);
7249 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t
, rw_flags
);
7250 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags
);
7251 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32
, rw_flags
);
7252 BUILD_BUG_SQE_ELEM(28, __u32
, fsync_flags
);
7253 BUILD_BUG_SQE_ELEM(28, __u16
, poll_events
);
7254 BUILD_BUG_SQE_ELEM(28, __u32
, sync_range_flags
);
7255 BUILD_BUG_SQE_ELEM(28, __u32
, msg_flags
);
7256 BUILD_BUG_SQE_ELEM(28, __u32
, timeout_flags
);
7257 BUILD_BUG_SQE_ELEM(28, __u32
, accept_flags
);
7258 BUILD_BUG_SQE_ELEM(28, __u32
, cancel_flags
);
7259 BUILD_BUG_SQE_ELEM(28, __u32
, open_flags
);
7260 BUILD_BUG_SQE_ELEM(28, __u32
, statx_flags
);
7261 BUILD_BUG_SQE_ELEM(28, __u32
, fadvise_advice
);
7262 BUILD_BUG_SQE_ELEM(32, __u64
, user_data
);
7263 BUILD_BUG_SQE_ELEM(40, __u16
, buf_index
);
7264 BUILD_BUG_SQE_ELEM(42, __u16
, personality
);
7266 BUILD_BUG_ON(ARRAY_SIZE(io_op_defs
) != IORING_OP_LAST
);
7267 req_cachep
= KMEM_CACHE(io_kiocb
, SLAB_HWCACHE_ALIGN
| SLAB_PANIC
);
7270 __initcall(io_uring_init
);