1 // SPDX-License-Identifier: GPL-2.0
3 * Shared application/kernel submission and completion ring pairs, for
4 * supporting fast/efficient IO.
6 * A note on the read/write ordering memory barriers that are matched between
7 * the application and kernel side.
9 * After the application reads the CQ ring tail, it must use an
10 * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11 * before writing the tail (using smp_load_acquire to read the tail will
12 * do). It also needs a smp_mb() before updating CQ head (ordering the
13 * entry load(s) with the head store), pairing with an implicit barrier
14 * through a control-dependency in io_get_cqe (smp_store_release to
15 * store head will do). Failure to do so could lead to reading invalid
18 * Likewise, the application must use an appropriate smp_wmb() before
19 * writing the SQ tail (ordering SQ entry stores with the tail store),
20 * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21 * to store the tail will do). And it needs a barrier ordering the SQ
22 * head load before writing new SQ entries (smp_load_acquire to read
25 * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26 * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27 * updating the SQ tail; a full memory barrier smp_mb() is needed
30 * Also see the examples in the liburing library:
32 * git://git.kernel.dk/liburing
34 * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35 * from data shared between the kernel and application. This is done both
36 * for ordering purposes, but also to ensure that once a value is loaded from
37 * data that the application could potentially modify, it remains stable.
39 * Copyright (C) 2018-2019 Jens Axboe
40 * Copyright (c) 2018-2019 Christoph Hellwig
42 #include <linux/kernel.h>
43 #include <linux/init.h>
44 #include <linux/errno.h>
45 #include <linux/syscalls.h>
46 #include <net/compat.h>
47 #include <linux/refcount.h>
48 #include <linux/uio.h>
49 #include <linux/bits.h>
51 #include <linux/sched/signal.h>
53 #include <linux/file.h>
54 #include <linux/fdtable.h>
56 #include <linux/mman.h>
57 #include <linux/percpu.h>
58 #include <linux/slab.h>
59 #include <linux/bvec.h>
60 #include <linux/net.h>
62 #include <linux/anon_inodes.h>
63 #include <linux/sched/mm.h>
64 #include <linux/uaccess.h>
65 #include <linux/nospec.h>
66 #include <linux/fsnotify.h>
67 #include <linux/fadvise.h>
68 #include <linux/task_work.h>
69 #include <linux/io_uring.h>
70 #include <linux/io_uring/cmd.h>
71 #include <linux/audit.h>
72 #include <linux/security.h>
73 #include <asm/shmparam.h>
75 #define CREATE_TRACE_POINTS
76 #include <trace/events/io_uring.h>
78 #include <uapi/linux/io_uring.h>
97 #include "uring_cmd.h"
104 #include "alloc_cache.h"
107 #define IORING_MAX_ENTRIES 32768
108 #define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
110 #define SQE_COMMON_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_LINK | \
111 IOSQE_IO_HARDLINK | IOSQE_ASYNC)
113 #define SQE_VALID_FLAGS (SQE_COMMON_FLAGS | IOSQE_BUFFER_SELECT | \
114 IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS)
116 #define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
117 REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \
120 #define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | REQ_F_LINK | REQ_F_HARDLINK |\
123 #define IO_TCTX_REFS_CACHE_NR (1U << 10)
125 #define IO_COMPL_BATCH 32
126 #define IO_REQ_ALLOC_BATCH 8
128 struct io_defer_entry
{
129 struct list_head list
;
130 struct io_kiocb
*req
;
134 /* requests with any of those set should undergo io_disarm_next() */
135 #define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL)
136 #define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK)
139 * No waiters. It's larger than any valid value of the tw counter
140 * so that tests against ->cq_wait_nr would fail and skip wake_up().
142 #define IO_CQ_WAKE_INIT (-1U)
143 /* Forced wake up if there is a waiter regardless of ->cq_wait_nr */
144 #define IO_CQ_WAKE_FORCE (IO_CQ_WAKE_INIT >> 1)
146 static bool io_uring_try_cancel_requests(struct io_ring_ctx
*ctx
,
147 struct task_struct
*task
,
150 static void io_queue_sqe(struct io_kiocb
*req
);
152 struct kmem_cache
*req_cachep
;
153 static struct workqueue_struct
*iou_wq __ro_after_init
;
155 static int __read_mostly sysctl_io_uring_disabled
;
156 static int __read_mostly sysctl_io_uring_group
= -1;
159 static struct ctl_table kernel_io_uring_disabled_table
[] = {
161 .procname
= "io_uring_disabled",
162 .data
= &sysctl_io_uring_disabled
,
163 .maxlen
= sizeof(sysctl_io_uring_disabled
),
165 .proc_handler
= proc_dointvec_minmax
,
166 .extra1
= SYSCTL_ZERO
,
167 .extra2
= SYSCTL_TWO
,
170 .procname
= "io_uring_group",
171 .data
= &sysctl_io_uring_group
,
172 .maxlen
= sizeof(gid_t
),
174 .proc_handler
= proc_dointvec
,
179 static inline unsigned int __io_cqring_events(struct io_ring_ctx
*ctx
)
181 return ctx
->cached_cq_tail
- READ_ONCE(ctx
->rings
->cq
.head
);
184 static inline unsigned int __io_cqring_events_user(struct io_ring_ctx
*ctx
)
186 return READ_ONCE(ctx
->rings
->cq
.tail
) - READ_ONCE(ctx
->rings
->cq
.head
);
189 static bool io_match_linked(struct io_kiocb
*head
)
191 struct io_kiocb
*req
;
193 io_for_each_link(req
, head
) {
194 if (req
->flags
& REQ_F_INFLIGHT
)
201 * As io_match_task() but protected against racing with linked timeouts.
202 * User must not hold timeout_lock.
204 bool io_match_task_safe(struct io_kiocb
*head
, struct task_struct
*task
,
209 if (task
&& head
->task
!= task
)
214 if (head
->flags
& REQ_F_LINK_TIMEOUT
) {
215 struct io_ring_ctx
*ctx
= head
->ctx
;
217 /* protect against races with linked timeouts */
218 spin_lock_irq(&ctx
->timeout_lock
);
219 matched
= io_match_linked(head
);
220 spin_unlock_irq(&ctx
->timeout_lock
);
222 matched
= io_match_linked(head
);
227 static inline void req_fail_link_node(struct io_kiocb
*req
, int res
)
230 io_req_set_res(req
, res
, 0);
233 static inline void io_req_add_to_cache(struct io_kiocb
*req
, struct io_ring_ctx
*ctx
)
235 wq_stack_add_head(&req
->comp_list
, &ctx
->submit_state
.free_list
);
238 static __cold
void io_ring_ctx_ref_free(struct percpu_ref
*ref
)
240 struct io_ring_ctx
*ctx
= container_of(ref
, struct io_ring_ctx
, refs
);
242 complete(&ctx
->ref_comp
);
245 static __cold
void io_fallback_req_func(struct work_struct
*work
)
247 struct io_ring_ctx
*ctx
= container_of(work
, struct io_ring_ctx
,
249 struct llist_node
*node
= llist_del_all(&ctx
->fallback_llist
);
250 struct io_kiocb
*req
, *tmp
;
251 struct io_tw_state ts
= {};
253 percpu_ref_get(&ctx
->refs
);
254 mutex_lock(&ctx
->uring_lock
);
255 llist_for_each_entry_safe(req
, tmp
, node
, io_task_work
.node
)
256 req
->io_task_work
.func(req
, &ts
);
257 io_submit_flush_completions(ctx
);
258 mutex_unlock(&ctx
->uring_lock
);
259 percpu_ref_put(&ctx
->refs
);
262 static int io_alloc_hash_table(struct io_hash_table
*table
, unsigned bits
)
264 unsigned hash_buckets
= 1U << bits
;
265 size_t hash_size
= hash_buckets
* sizeof(table
->hbs
[0]);
267 table
->hbs
= kmalloc(hash_size
, GFP_KERNEL
);
271 table
->hash_bits
= bits
;
272 init_hash_table(table
, hash_buckets
);
276 static __cold
struct io_ring_ctx
*io_ring_ctx_alloc(struct io_uring_params
*p
)
278 struct io_ring_ctx
*ctx
;
282 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
286 xa_init(&ctx
->io_bl_xa
);
289 * Use 5 bits less than the max cq entries, that should give us around
290 * 32 entries per hash list if totally full and uniformly spread, but
291 * don't keep too many buckets to not overconsume memory.
293 hash_bits
= ilog2(p
->cq_entries
) - 5;
294 hash_bits
= clamp(hash_bits
, 1, 8);
295 if (io_alloc_hash_table(&ctx
->cancel_table
, hash_bits
))
297 if (io_alloc_hash_table(&ctx
->cancel_table_locked
, hash_bits
))
299 if (percpu_ref_init(&ctx
->refs
, io_ring_ctx_ref_free
,
303 ctx
->flags
= p
->flags
;
304 atomic_set(&ctx
->cq_wait_nr
, IO_CQ_WAKE_INIT
);
305 init_waitqueue_head(&ctx
->sqo_sq_wait
);
306 INIT_LIST_HEAD(&ctx
->sqd_list
);
307 INIT_LIST_HEAD(&ctx
->cq_overflow_list
);
308 INIT_LIST_HEAD(&ctx
->io_buffers_cache
);
309 ret
= io_alloc_cache_init(&ctx
->rsrc_node_cache
, IO_NODE_ALLOC_CACHE_MAX
,
310 sizeof(struct io_rsrc_node
));
311 ret
|= io_alloc_cache_init(&ctx
->apoll_cache
, IO_POLL_ALLOC_CACHE_MAX
,
312 sizeof(struct async_poll
));
313 ret
|= io_alloc_cache_init(&ctx
->netmsg_cache
, IO_ALLOC_CACHE_MAX
,
314 sizeof(struct io_async_msghdr
));
315 ret
|= io_alloc_cache_init(&ctx
->rw_cache
, IO_ALLOC_CACHE_MAX
,
316 sizeof(struct io_async_rw
));
317 ret
|= io_alloc_cache_init(&ctx
->uring_cache
, IO_ALLOC_CACHE_MAX
,
318 sizeof(struct uring_cache
));
319 spin_lock_init(&ctx
->msg_lock
);
320 ret
|= io_alloc_cache_init(&ctx
->msg_cache
, IO_ALLOC_CACHE_MAX
,
321 sizeof(struct io_kiocb
));
322 ret
|= io_futex_cache_init(ctx
);
325 init_completion(&ctx
->ref_comp
);
326 xa_init_flags(&ctx
->personalities
, XA_FLAGS_ALLOC1
);
327 mutex_init(&ctx
->uring_lock
);
328 init_waitqueue_head(&ctx
->cq_wait
);
329 init_waitqueue_head(&ctx
->poll_wq
);
330 init_waitqueue_head(&ctx
->rsrc_quiesce_wq
);
331 spin_lock_init(&ctx
->completion_lock
);
332 spin_lock_init(&ctx
->timeout_lock
);
333 INIT_WQ_LIST(&ctx
->iopoll_list
);
334 INIT_LIST_HEAD(&ctx
->io_buffers_comp
);
335 INIT_LIST_HEAD(&ctx
->defer_list
);
336 INIT_LIST_HEAD(&ctx
->timeout_list
);
337 INIT_LIST_HEAD(&ctx
->ltimeout_list
);
338 INIT_LIST_HEAD(&ctx
->rsrc_ref_list
);
339 init_llist_head(&ctx
->work_llist
);
340 INIT_LIST_HEAD(&ctx
->tctx_list
);
341 ctx
->submit_state
.free_list
.next
= NULL
;
342 INIT_HLIST_HEAD(&ctx
->waitid_list
);
344 INIT_HLIST_HEAD(&ctx
->futex_list
);
346 INIT_DELAYED_WORK(&ctx
->fallback_work
, io_fallback_req_func
);
347 INIT_WQ_LIST(&ctx
->submit_state
.compl_reqs
);
348 INIT_HLIST_HEAD(&ctx
->cancelable_uring_cmd
);
354 percpu_ref_exit(&ctx
->refs
);
356 io_alloc_cache_free(&ctx
->rsrc_node_cache
, kfree
);
357 io_alloc_cache_free(&ctx
->apoll_cache
, kfree
);
358 io_alloc_cache_free(&ctx
->netmsg_cache
, io_netmsg_cache_free
);
359 io_alloc_cache_free(&ctx
->rw_cache
, io_rw_cache_free
);
360 io_alloc_cache_free(&ctx
->uring_cache
, kfree
);
361 io_alloc_cache_free(&ctx
->msg_cache
, io_msg_cache_free
);
362 io_futex_cache_free(ctx
);
363 kfree(ctx
->cancel_table
.hbs
);
364 kfree(ctx
->cancel_table_locked
.hbs
);
365 xa_destroy(&ctx
->io_bl_xa
);
370 static void io_account_cq_overflow(struct io_ring_ctx
*ctx
)
372 struct io_rings
*r
= ctx
->rings
;
374 WRITE_ONCE(r
->cq_overflow
, READ_ONCE(r
->cq_overflow
) + 1);
378 static bool req_need_defer(struct io_kiocb
*req
, u32 seq
)
380 if (unlikely(req
->flags
& REQ_F_IO_DRAIN
)) {
381 struct io_ring_ctx
*ctx
= req
->ctx
;
383 return seq
+ READ_ONCE(ctx
->cq_extra
) != ctx
->cached_cq_tail
;
389 static void io_clean_op(struct io_kiocb
*req
)
391 if (req
->flags
& REQ_F_BUFFER_SELECTED
) {
392 spin_lock(&req
->ctx
->completion_lock
);
394 spin_unlock(&req
->ctx
->completion_lock
);
397 if (req
->flags
& REQ_F_NEED_CLEANUP
) {
398 const struct io_cold_def
*def
= &io_cold_defs
[req
->opcode
];
403 if ((req
->flags
& REQ_F_POLLED
) && req
->apoll
) {
404 kfree(req
->apoll
->double_poll
);
408 if (req
->flags
& REQ_F_INFLIGHT
) {
409 struct io_uring_task
*tctx
= req
->task
->io_uring
;
411 atomic_dec(&tctx
->inflight_tracked
);
413 if (req
->flags
& REQ_F_CREDS
)
414 put_cred(req
->creds
);
415 if (req
->flags
& REQ_F_ASYNC_DATA
) {
416 kfree(req
->async_data
);
417 req
->async_data
= NULL
;
419 req
->flags
&= ~IO_REQ_CLEAN_FLAGS
;
422 static inline void io_req_track_inflight(struct io_kiocb
*req
)
424 if (!(req
->flags
& REQ_F_INFLIGHT
)) {
425 req
->flags
|= REQ_F_INFLIGHT
;
426 atomic_inc(&req
->task
->io_uring
->inflight_tracked
);
430 static struct io_kiocb
*__io_prep_linked_timeout(struct io_kiocb
*req
)
432 if (WARN_ON_ONCE(!req
->link
))
435 req
->flags
&= ~REQ_F_ARM_LTIMEOUT
;
436 req
->flags
|= REQ_F_LINK_TIMEOUT
;
438 /* linked timeouts should have two refs once prep'ed */
439 io_req_set_refcount(req
);
440 __io_req_set_refcount(req
->link
, 2);
444 static inline struct io_kiocb
*io_prep_linked_timeout(struct io_kiocb
*req
)
446 if (likely(!(req
->flags
& REQ_F_ARM_LTIMEOUT
)))
448 return __io_prep_linked_timeout(req
);
451 static noinline
void __io_arm_ltimeout(struct io_kiocb
*req
)
453 io_queue_linked_timeout(__io_prep_linked_timeout(req
));
456 static inline void io_arm_ltimeout(struct io_kiocb
*req
)
458 if (unlikely(req
->flags
& REQ_F_ARM_LTIMEOUT
))
459 __io_arm_ltimeout(req
);
462 static void io_prep_async_work(struct io_kiocb
*req
)
464 const struct io_issue_def
*def
= &io_issue_defs
[req
->opcode
];
465 struct io_ring_ctx
*ctx
= req
->ctx
;
467 if (!(req
->flags
& REQ_F_CREDS
)) {
468 req
->flags
|= REQ_F_CREDS
;
469 req
->creds
= get_current_cred();
472 req
->work
.list
.next
= NULL
;
473 atomic_set(&req
->work
.flags
, 0);
474 if (req
->flags
& REQ_F_FORCE_ASYNC
)
475 atomic_or(IO_WQ_WORK_CONCURRENT
, &req
->work
.flags
);
477 if (req
->file
&& !(req
->flags
& REQ_F_FIXED_FILE
))
478 req
->flags
|= io_file_get_flags(req
->file
);
480 if (req
->file
&& (req
->flags
& REQ_F_ISREG
)) {
481 bool should_hash
= def
->hash_reg_file
;
483 /* don't serialize this request if the fs doesn't need it */
484 if (should_hash
&& (req
->file
->f_flags
& O_DIRECT
) &&
485 (req
->file
->f_op
->fop_flags
& FOP_DIO_PARALLEL_WRITE
))
487 if (should_hash
|| (ctx
->flags
& IORING_SETUP_IOPOLL
))
488 io_wq_hash_work(&req
->work
, file_inode(req
->file
));
489 } else if (!req
->file
|| !S_ISBLK(file_inode(req
->file
)->i_mode
)) {
490 if (def
->unbound_nonreg_file
)
491 atomic_or(IO_WQ_WORK_UNBOUND
, &req
->work
.flags
);
495 static void io_prep_async_link(struct io_kiocb
*req
)
497 struct io_kiocb
*cur
;
499 if (req
->flags
& REQ_F_LINK_TIMEOUT
) {
500 struct io_ring_ctx
*ctx
= req
->ctx
;
502 spin_lock_irq(&ctx
->timeout_lock
);
503 io_for_each_link(cur
, req
)
504 io_prep_async_work(cur
);
505 spin_unlock_irq(&ctx
->timeout_lock
);
507 io_for_each_link(cur
, req
)
508 io_prep_async_work(cur
);
512 static void io_queue_iowq(struct io_kiocb
*req
)
514 struct io_kiocb
*link
= io_prep_linked_timeout(req
);
515 struct io_uring_task
*tctx
= req
->task
->io_uring
;
518 BUG_ON(!tctx
->io_wq
);
520 /* init ->work of the whole link before punting */
521 io_prep_async_link(req
);
524 * Not expected to happen, but if we do have a bug where this _can_
525 * happen, catch it here and ensure the request is marked as
526 * canceled. That will make io-wq go through the usual work cancel
527 * procedure rather than attempt to run this request (or create a new
530 if (WARN_ON_ONCE(!same_thread_group(req
->task
, current
)))
531 atomic_or(IO_WQ_WORK_CANCEL
, &req
->work
.flags
);
533 trace_io_uring_queue_async_work(req
, io_wq_is_hashed(&req
->work
));
534 io_wq_enqueue(tctx
->io_wq
, &req
->work
);
536 io_queue_linked_timeout(link
);
539 static void io_req_queue_iowq_tw(struct io_kiocb
*req
, struct io_tw_state
*ts
)
544 void io_req_queue_iowq(struct io_kiocb
*req
)
546 req
->io_task_work
.func
= io_req_queue_iowq_tw
;
547 io_req_task_work_add(req
);
550 static __cold
void io_queue_deferred(struct io_ring_ctx
*ctx
)
552 while (!list_empty(&ctx
->defer_list
)) {
553 struct io_defer_entry
*de
= list_first_entry(&ctx
->defer_list
,
554 struct io_defer_entry
, list
);
556 if (req_need_defer(de
->req
, de
->seq
))
558 list_del_init(&de
->list
);
559 io_req_task_queue(de
->req
);
564 void __io_commit_cqring_flush(struct io_ring_ctx
*ctx
)
566 if (ctx
->poll_activated
)
567 io_poll_wq_wake(ctx
);
568 if (ctx
->off_timeout_used
)
569 io_flush_timeouts(ctx
);
570 if (ctx
->drain_active
) {
571 spin_lock(&ctx
->completion_lock
);
572 io_queue_deferred(ctx
);
573 spin_unlock(&ctx
->completion_lock
);
576 io_eventfd_flush_signal(ctx
);
579 static inline void __io_cq_lock(struct io_ring_ctx
*ctx
)
581 if (!ctx
->lockless_cq
)
582 spin_lock(&ctx
->completion_lock
);
585 static inline void io_cq_lock(struct io_ring_ctx
*ctx
)
586 __acquires(ctx
->completion_lock
)
588 spin_lock(&ctx
->completion_lock
);
591 static inline void __io_cq_unlock_post(struct io_ring_ctx
*ctx
)
593 io_commit_cqring(ctx
);
594 if (!ctx
->task_complete
) {
595 if (!ctx
->lockless_cq
)
596 spin_unlock(&ctx
->completion_lock
);
597 /* IOPOLL rings only need to wake up if it's also SQPOLL */
598 if (!ctx
->syscall_iopoll
)
601 io_commit_cqring_flush(ctx
);
604 static void io_cq_unlock_post(struct io_ring_ctx
*ctx
)
605 __releases(ctx
->completion_lock
)
607 io_commit_cqring(ctx
);
608 spin_unlock(&ctx
->completion_lock
);
610 io_commit_cqring_flush(ctx
);
613 static void __io_cqring_overflow_flush(struct io_ring_ctx
*ctx
, bool dying
)
615 size_t cqe_size
= sizeof(struct io_uring_cqe
);
617 lockdep_assert_held(&ctx
->uring_lock
);
619 /* don't abort if we're dying, entries must get freed */
620 if (!dying
&& __io_cqring_events(ctx
) == ctx
->cq_entries
)
623 if (ctx
->flags
& IORING_SETUP_CQE32
)
627 while (!list_empty(&ctx
->cq_overflow_list
)) {
628 struct io_uring_cqe
*cqe
;
629 struct io_overflow_cqe
*ocqe
;
631 ocqe
= list_first_entry(&ctx
->cq_overflow_list
,
632 struct io_overflow_cqe
, list
);
635 if (!io_get_cqe_overflow(ctx
, &cqe
, true))
637 memcpy(cqe
, &ocqe
->cqe
, cqe_size
);
639 list_del(&ocqe
->list
);
643 * For silly syzbot cases that deliberately overflow by huge
644 * amounts, check if we need to resched and drop and
645 * reacquire the locks if so. Nothing real would ever hit this.
646 * Ideally we'd have a non-posting unlock for this, but hard
647 * to care for a non-real case.
649 if (need_resched()) {
650 io_cq_unlock_post(ctx
);
651 mutex_unlock(&ctx
->uring_lock
);
653 mutex_lock(&ctx
->uring_lock
);
658 if (list_empty(&ctx
->cq_overflow_list
)) {
659 clear_bit(IO_CHECK_CQ_OVERFLOW_BIT
, &ctx
->check_cq
);
660 atomic_andnot(IORING_SQ_CQ_OVERFLOW
, &ctx
->rings
->sq_flags
);
662 io_cq_unlock_post(ctx
);
665 static void io_cqring_overflow_kill(struct io_ring_ctx
*ctx
)
668 __io_cqring_overflow_flush(ctx
, true);
671 static void io_cqring_do_overflow_flush(struct io_ring_ctx
*ctx
)
673 mutex_lock(&ctx
->uring_lock
);
674 __io_cqring_overflow_flush(ctx
, false);
675 mutex_unlock(&ctx
->uring_lock
);
678 /* can be called by any task */
679 static void io_put_task_remote(struct task_struct
*task
)
681 struct io_uring_task
*tctx
= task
->io_uring
;
683 percpu_counter_sub(&tctx
->inflight
, 1);
684 if (unlikely(atomic_read(&tctx
->in_cancel
)))
685 wake_up(&tctx
->wait
);
686 put_task_struct(task
);
689 /* used by a task to put its own references */
690 static void io_put_task_local(struct task_struct
*task
)
692 task
->io_uring
->cached_refs
++;
695 /* must to be called somewhat shortly after putting a request */
696 static inline void io_put_task(struct task_struct
*task
)
698 if (likely(task
== current
))
699 io_put_task_local(task
);
701 io_put_task_remote(task
);
704 void io_task_refs_refill(struct io_uring_task
*tctx
)
706 unsigned int refill
= -tctx
->cached_refs
+ IO_TCTX_REFS_CACHE_NR
;
708 percpu_counter_add(&tctx
->inflight
, refill
);
709 refcount_add(refill
, ¤t
->usage
);
710 tctx
->cached_refs
+= refill
;
713 static __cold
void io_uring_drop_tctx_refs(struct task_struct
*task
)
715 struct io_uring_task
*tctx
= task
->io_uring
;
716 unsigned int refs
= tctx
->cached_refs
;
719 tctx
->cached_refs
= 0;
720 percpu_counter_sub(&tctx
->inflight
, refs
);
721 put_task_struct_many(task
, refs
);
725 static bool io_cqring_event_overflow(struct io_ring_ctx
*ctx
, u64 user_data
,
726 s32 res
, u32 cflags
, u64 extra1
, u64 extra2
)
728 struct io_overflow_cqe
*ocqe
;
729 size_t ocq_size
= sizeof(struct io_overflow_cqe
);
730 bool is_cqe32
= (ctx
->flags
& IORING_SETUP_CQE32
);
732 lockdep_assert_held(&ctx
->completion_lock
);
735 ocq_size
+= sizeof(struct io_uring_cqe
);
737 ocqe
= kmalloc(ocq_size
, GFP_ATOMIC
| __GFP_ACCOUNT
);
738 trace_io_uring_cqe_overflow(ctx
, user_data
, res
, cflags
, ocqe
);
741 * If we're in ring overflow flush mode, or in task cancel mode,
742 * or cannot allocate an overflow entry, then we need to drop it
745 io_account_cq_overflow(ctx
);
746 set_bit(IO_CHECK_CQ_DROPPED_BIT
, &ctx
->check_cq
);
749 if (list_empty(&ctx
->cq_overflow_list
)) {
750 set_bit(IO_CHECK_CQ_OVERFLOW_BIT
, &ctx
->check_cq
);
751 atomic_or(IORING_SQ_CQ_OVERFLOW
, &ctx
->rings
->sq_flags
);
754 ocqe
->cqe
.user_data
= user_data
;
756 ocqe
->cqe
.flags
= cflags
;
758 ocqe
->cqe
.big_cqe
[0] = extra1
;
759 ocqe
->cqe
.big_cqe
[1] = extra2
;
761 list_add_tail(&ocqe
->list
, &ctx
->cq_overflow_list
);
765 static void io_req_cqe_overflow(struct io_kiocb
*req
)
767 io_cqring_event_overflow(req
->ctx
, req
->cqe
.user_data
,
768 req
->cqe
.res
, req
->cqe
.flags
,
769 req
->big_cqe
.extra1
, req
->big_cqe
.extra2
);
770 memset(&req
->big_cqe
, 0, sizeof(req
->big_cqe
));
774 * writes to the cq entry need to come after reading head; the
775 * control dependency is enough as we're using WRITE_ONCE to
778 bool io_cqe_cache_refill(struct io_ring_ctx
*ctx
, bool overflow
)
780 struct io_rings
*rings
= ctx
->rings
;
781 unsigned int off
= ctx
->cached_cq_tail
& (ctx
->cq_entries
- 1);
782 unsigned int free
, queued
, len
;
785 * Posting into the CQ when there are pending overflowed CQEs may break
786 * ordering guarantees, which will affect links, F_MORE users and more.
787 * Force overflow the completion.
789 if (!overflow
&& (ctx
->check_cq
& BIT(IO_CHECK_CQ_OVERFLOW_BIT
)))
792 /* userspace may cheat modifying the tail, be safe and do min */
793 queued
= min(__io_cqring_events(ctx
), ctx
->cq_entries
);
794 free
= ctx
->cq_entries
- queued
;
795 /* we need a contiguous range, limit based on the current array offset */
796 len
= min(free
, ctx
->cq_entries
- off
);
800 if (ctx
->flags
& IORING_SETUP_CQE32
) {
805 ctx
->cqe_cached
= &rings
->cqes
[off
];
806 ctx
->cqe_sentinel
= ctx
->cqe_cached
+ len
;
810 static bool io_fill_cqe_aux(struct io_ring_ctx
*ctx
, u64 user_data
, s32 res
,
813 struct io_uring_cqe
*cqe
;
818 * If we can't get a cq entry, userspace overflowed the
819 * submission (by quite a lot). Increment the overflow count in
822 if (likely(io_get_cqe(ctx
, &cqe
))) {
823 trace_io_uring_complete(ctx
, NULL
, user_data
, res
, cflags
, 0, 0);
825 WRITE_ONCE(cqe
->user_data
, user_data
);
826 WRITE_ONCE(cqe
->res
, res
);
827 WRITE_ONCE(cqe
->flags
, cflags
);
829 if (ctx
->flags
& IORING_SETUP_CQE32
) {
830 WRITE_ONCE(cqe
->big_cqe
[0], 0);
831 WRITE_ONCE(cqe
->big_cqe
[1], 0);
838 static bool __io_post_aux_cqe(struct io_ring_ctx
*ctx
, u64 user_data
, s32 res
,
843 filled
= io_fill_cqe_aux(ctx
, user_data
, res
, cflags
);
845 filled
= io_cqring_event_overflow(ctx
, user_data
, res
, cflags
, 0, 0);
850 bool io_post_aux_cqe(struct io_ring_ctx
*ctx
, u64 user_data
, s32 res
, u32 cflags
)
855 filled
= __io_post_aux_cqe(ctx
, user_data
, res
, cflags
);
856 io_cq_unlock_post(ctx
);
861 * Must be called from inline task_work so we now a flush will happen later,
862 * and obviously with ctx->uring_lock held (tw always has that).
864 void io_add_aux_cqe(struct io_ring_ctx
*ctx
, u64 user_data
, s32 res
, u32 cflags
)
866 if (!io_fill_cqe_aux(ctx
, user_data
, res
, cflags
)) {
867 spin_lock(&ctx
->completion_lock
);
868 io_cqring_event_overflow(ctx
, user_data
, res
, cflags
, 0, 0);
869 spin_unlock(&ctx
->completion_lock
);
871 ctx
->submit_state
.cq_flush
= true;
875 * A helper for multishot requests posting additional CQEs.
876 * Should only be used from a task_work including IO_URING_F_MULTISHOT.
878 bool io_req_post_cqe(struct io_kiocb
*req
, s32 res
, u32 cflags
)
880 struct io_ring_ctx
*ctx
= req
->ctx
;
883 lockdep_assert(!io_wq_current_is_worker());
884 lockdep_assert_held(&ctx
->uring_lock
);
887 posted
= io_fill_cqe_aux(ctx
, req
->cqe
.user_data
, res
, cflags
);
888 ctx
->submit_state
.cq_flush
= true;
889 __io_cq_unlock_post(ctx
);
893 static void io_req_complete_post(struct io_kiocb
*req
, unsigned issue_flags
)
895 struct io_ring_ctx
*ctx
= req
->ctx
;
898 * All execution paths but io-wq use the deferred completions by
899 * passing IO_URING_F_COMPLETE_DEFER and thus should not end up here.
901 if (WARN_ON_ONCE(!(issue_flags
& IO_URING_F_IOWQ
)))
905 * Handle special CQ sync cases via task_work. DEFER_TASKRUN requires
906 * the submitter task context, IOPOLL protects with uring_lock.
908 if (ctx
->task_complete
|| (ctx
->flags
& IORING_SETUP_IOPOLL
)) {
909 req
->io_task_work
.func
= io_req_task_complete
;
910 io_req_task_work_add(req
);
915 if (!(req
->flags
& REQ_F_CQE_SKIP
)) {
916 if (!io_fill_cqe_req(ctx
, req
))
917 io_req_cqe_overflow(req
);
919 io_cq_unlock_post(ctx
);
922 * We don't free the request here because we know it's called from
923 * io-wq only, which holds a reference, so it cannot be the last put.
928 void io_req_defer_failed(struct io_kiocb
*req
, s32 res
)
929 __must_hold(&ctx
->uring_lock
)
931 const struct io_cold_def
*def
= &io_cold_defs
[req
->opcode
];
933 lockdep_assert_held(&req
->ctx
->uring_lock
);
936 io_req_set_res(req
, res
, io_put_kbuf(req
, res
, IO_URING_F_UNLOCKED
));
939 io_req_complete_defer(req
);
943 * Don't initialise the fields below on every allocation, but do that in
944 * advance and keep them valid across allocations.
946 static void io_preinit_req(struct io_kiocb
*req
, struct io_ring_ctx
*ctx
)
950 req
->async_data
= NULL
;
951 /* not necessary, but safer to zero */
952 memset(&req
->cqe
, 0, sizeof(req
->cqe
));
953 memset(&req
->big_cqe
, 0, sizeof(req
->big_cqe
));
957 * A request might get retired back into the request caches even before opcode
958 * handlers and io_issue_sqe() are done with it, e.g. inline completion path.
959 * Because of that, io_alloc_req() should be called only under ->uring_lock
960 * and with extra caution to not get a request that is still worked on.
962 __cold
bool __io_alloc_req_refill(struct io_ring_ctx
*ctx
)
963 __must_hold(&ctx
->uring_lock
)
965 gfp_t gfp
= GFP_KERNEL
| __GFP_NOWARN
;
966 void *reqs
[IO_REQ_ALLOC_BATCH
];
969 ret
= kmem_cache_alloc_bulk(req_cachep
, gfp
, ARRAY_SIZE(reqs
), reqs
);
972 * Bulk alloc is all-or-nothing. If we fail to get a batch,
973 * retry single alloc to be on the safe side.
975 if (unlikely(ret
<= 0)) {
976 reqs
[0] = kmem_cache_alloc(req_cachep
, gfp
);
982 percpu_ref_get_many(&ctx
->refs
, ret
);
984 struct io_kiocb
*req
= reqs
[ret
];
986 io_preinit_req(req
, ctx
);
987 io_req_add_to_cache(req
, ctx
);
992 __cold
void io_free_req(struct io_kiocb
*req
)
994 /* refs were already put, restore them for io_req_task_complete() */
995 req
->flags
&= ~REQ_F_REFCOUNT
;
996 /* we only want to free it, don't post CQEs */
997 req
->flags
|= REQ_F_CQE_SKIP
;
998 req
->io_task_work
.func
= io_req_task_complete
;
999 io_req_task_work_add(req
);
1002 static void __io_req_find_next_prep(struct io_kiocb
*req
)
1004 struct io_ring_ctx
*ctx
= req
->ctx
;
1006 spin_lock(&ctx
->completion_lock
);
1007 io_disarm_next(req
);
1008 spin_unlock(&ctx
->completion_lock
);
1011 static inline struct io_kiocb
*io_req_find_next(struct io_kiocb
*req
)
1013 struct io_kiocb
*nxt
;
1016 * If LINK is set, we have dependent requests in this chain. If we
1017 * didn't fail this request, queue the first one up, moving any other
1018 * dependencies to the next request. In case of failure, fail the rest
1021 if (unlikely(req
->flags
& IO_DISARM_MASK
))
1022 __io_req_find_next_prep(req
);
1028 static void ctx_flush_and_put(struct io_ring_ctx
*ctx
, struct io_tw_state
*ts
)
1032 if (ctx
->flags
& IORING_SETUP_TASKRUN_FLAG
)
1033 atomic_andnot(IORING_SQ_TASKRUN
, &ctx
->rings
->sq_flags
);
1035 io_submit_flush_completions(ctx
);
1036 mutex_unlock(&ctx
->uring_lock
);
1037 percpu_ref_put(&ctx
->refs
);
1041 * Run queued task_work, returning the number of entries processed in *count.
1042 * If more entries than max_entries are available, stop processing once this
1043 * is reached and return the rest of the list.
1045 struct llist_node
*io_handle_tw_list(struct llist_node
*node
,
1046 unsigned int *count
,
1047 unsigned int max_entries
)
1049 struct io_ring_ctx
*ctx
= NULL
;
1050 struct io_tw_state ts
= { };
1053 struct llist_node
*next
= node
->next
;
1054 struct io_kiocb
*req
= container_of(node
, struct io_kiocb
,
1057 if (req
->ctx
!= ctx
) {
1058 ctx_flush_and_put(ctx
, &ts
);
1060 mutex_lock(&ctx
->uring_lock
);
1061 percpu_ref_get(&ctx
->refs
);
1063 INDIRECT_CALL_2(req
->io_task_work
.func
,
1064 io_poll_task_func
, io_req_rw_complete
,
1068 if (unlikely(need_resched())) {
1069 ctx_flush_and_put(ctx
, &ts
);
1073 } while (node
&& *count
< max_entries
);
1075 ctx_flush_and_put(ctx
, &ts
);
1080 * io_llist_xchg - swap all entries in a lock-less list
1081 * @head: the head of lock-less list to delete all entries
1082 * @new: new entry as the head of the list
1084 * If list is empty, return NULL, otherwise, return the pointer to the first entry.
1085 * The order of entries returned is from the newest to the oldest added one.
1087 static inline struct llist_node
*io_llist_xchg(struct llist_head
*head
,
1088 struct llist_node
*new)
1090 return xchg(&head
->first
, new);
1093 static __cold
void io_fallback_tw(struct io_uring_task
*tctx
, bool sync
)
1095 struct llist_node
*node
= llist_del_all(&tctx
->task_list
);
1096 struct io_ring_ctx
*last_ctx
= NULL
;
1097 struct io_kiocb
*req
;
1100 req
= container_of(node
, struct io_kiocb
, io_task_work
.node
);
1102 if (sync
&& last_ctx
!= req
->ctx
) {
1104 flush_delayed_work(&last_ctx
->fallback_work
);
1105 percpu_ref_put(&last_ctx
->refs
);
1107 last_ctx
= req
->ctx
;
1108 percpu_ref_get(&last_ctx
->refs
);
1110 if (llist_add(&req
->io_task_work
.node
,
1111 &req
->ctx
->fallback_llist
))
1112 schedule_delayed_work(&req
->ctx
->fallback_work
, 1);
1116 flush_delayed_work(&last_ctx
->fallback_work
);
1117 percpu_ref_put(&last_ctx
->refs
);
1121 struct llist_node
*tctx_task_work_run(struct io_uring_task
*tctx
,
1122 unsigned int max_entries
,
1123 unsigned int *count
)
1125 struct llist_node
*node
;
1127 if (unlikely(current
->flags
& PF_EXITING
)) {
1128 io_fallback_tw(tctx
, true);
1132 node
= llist_del_all(&tctx
->task_list
);
1134 node
= llist_reverse_order(node
);
1135 node
= io_handle_tw_list(node
, count
, max_entries
);
1138 /* relaxed read is enough as only the task itself sets ->in_cancel */
1139 if (unlikely(atomic_read(&tctx
->in_cancel
)))
1140 io_uring_drop_tctx_refs(current
);
1142 trace_io_uring_task_work_run(tctx
, *count
);
1146 void tctx_task_work(struct callback_head
*cb
)
1148 struct io_uring_task
*tctx
;
1149 struct llist_node
*ret
;
1150 unsigned int count
= 0;
1152 tctx
= container_of(cb
, struct io_uring_task
, task_work
);
1153 ret
= tctx_task_work_run(tctx
, UINT_MAX
, &count
);
1158 static inline void io_req_local_work_add(struct io_kiocb
*req
,
1159 struct io_ring_ctx
*ctx
,
1162 unsigned nr_wait
, nr_tw
, nr_tw_prev
;
1163 struct llist_node
*head
;
1165 /* See comment above IO_CQ_WAKE_INIT */
1166 BUILD_BUG_ON(IO_CQ_WAKE_FORCE
<= IORING_MAX_CQ_ENTRIES
);
1169 * We don't know how many reuqests is there in the link and whether
1170 * they can even be queued lazily, fall back to non-lazy.
1172 if (req
->flags
& (REQ_F_LINK
| REQ_F_HARDLINK
))
1173 flags
&= ~IOU_F_TWQ_LAZY_WAKE
;
1177 head
= READ_ONCE(ctx
->work_llist
.first
);
1181 struct io_kiocb
*first_req
= container_of(head
,
1185 * Might be executed at any moment, rely on
1186 * SLAB_TYPESAFE_BY_RCU to keep it alive.
1188 nr_tw_prev
= READ_ONCE(first_req
->nr_tw
);
1192 * Theoretically, it can overflow, but that's fine as one of
1193 * previous adds should've tried to wake the task.
1195 nr_tw
= nr_tw_prev
+ 1;
1196 if (!(flags
& IOU_F_TWQ_LAZY_WAKE
))
1197 nr_tw
= IO_CQ_WAKE_FORCE
;
1200 req
->io_task_work
.node
.next
= head
;
1201 } while (!try_cmpxchg(&ctx
->work_llist
.first
, &head
,
1202 &req
->io_task_work
.node
));
1205 * cmpxchg implies a full barrier, which pairs with the barrier
1206 * in set_current_state() on the io_cqring_wait() side. It's used
1207 * to ensure that either we see updated ->cq_wait_nr, or waiters
1208 * going to sleep will observe the work added to the list, which
1209 * is similar to the wait/wawke task state sync.
1213 if (ctx
->flags
& IORING_SETUP_TASKRUN_FLAG
)
1214 atomic_or(IORING_SQ_TASKRUN
, &ctx
->rings
->sq_flags
);
1216 io_eventfd_signal(ctx
);
1219 nr_wait
= atomic_read(&ctx
->cq_wait_nr
);
1220 /* not enough or no one is waiting */
1221 if (nr_tw
< nr_wait
)
1223 /* the previous add has already woken it up */
1224 if (nr_tw_prev
>= nr_wait
)
1226 wake_up_state(ctx
->submitter_task
, TASK_INTERRUPTIBLE
);
1229 static void io_req_normal_work_add(struct io_kiocb
*req
)
1231 struct io_uring_task
*tctx
= req
->task
->io_uring
;
1232 struct io_ring_ctx
*ctx
= req
->ctx
;
1234 /* task_work already pending, we're done */
1235 if (!llist_add(&req
->io_task_work
.node
, &tctx
->task_list
))
1238 if (ctx
->flags
& IORING_SETUP_TASKRUN_FLAG
)
1239 atomic_or(IORING_SQ_TASKRUN
, &ctx
->rings
->sq_flags
);
1241 /* SQPOLL doesn't need the task_work added, it'll run it itself */
1242 if (ctx
->flags
& IORING_SETUP_SQPOLL
) {
1243 struct io_sq_data
*sqd
= ctx
->sq_data
;
1246 __set_notify_signal(sqd
->thread
);
1250 if (likely(!task_work_add(req
->task
, &tctx
->task_work
, ctx
->notify_method
)))
1253 io_fallback_tw(tctx
, false);
1256 void __io_req_task_work_add(struct io_kiocb
*req
, unsigned flags
)
1258 if (req
->ctx
->flags
& IORING_SETUP_DEFER_TASKRUN
)
1259 io_req_local_work_add(req
, req
->ctx
, flags
);
1261 io_req_normal_work_add(req
);
1264 void io_req_task_work_add_remote(struct io_kiocb
*req
, struct io_ring_ctx
*ctx
,
1267 if (WARN_ON_ONCE(!(ctx
->flags
& IORING_SETUP_DEFER_TASKRUN
)))
1269 io_req_local_work_add(req
, ctx
, flags
);
1272 static void __cold
io_move_task_work_from_local(struct io_ring_ctx
*ctx
)
1274 struct llist_node
*node
;
1276 node
= llist_del_all(&ctx
->work_llist
);
1278 struct io_kiocb
*req
= container_of(node
, struct io_kiocb
,
1282 io_req_normal_work_add(req
);
1286 static bool io_run_local_work_continue(struct io_ring_ctx
*ctx
, int events
,
1289 if (llist_empty(&ctx
->work_llist
))
1291 if (events
< min_events
)
1293 if (ctx
->flags
& IORING_SETUP_TASKRUN_FLAG
)
1294 atomic_or(IORING_SQ_TASKRUN
, &ctx
->rings
->sq_flags
);
1298 static int __io_run_local_work(struct io_ring_ctx
*ctx
, struct io_tw_state
*ts
,
1301 struct llist_node
*node
;
1302 unsigned int loops
= 0;
1305 if (WARN_ON_ONCE(ctx
->submitter_task
!= current
))
1307 if (ctx
->flags
& IORING_SETUP_TASKRUN_FLAG
)
1308 atomic_andnot(IORING_SQ_TASKRUN
, &ctx
->rings
->sq_flags
);
1311 * llists are in reverse order, flip it back the right way before
1312 * running the pending items.
1314 node
= llist_reverse_order(io_llist_xchg(&ctx
->work_llist
, NULL
));
1316 struct llist_node
*next
= node
->next
;
1317 struct io_kiocb
*req
= container_of(node
, struct io_kiocb
,
1319 INDIRECT_CALL_2(req
->io_task_work
.func
,
1320 io_poll_task_func
, io_req_rw_complete
,
1327 if (io_run_local_work_continue(ctx
, ret
, min_events
))
1329 io_submit_flush_completions(ctx
);
1330 if (io_run_local_work_continue(ctx
, ret
, min_events
))
1333 trace_io_uring_local_work_run(ctx
, ret
, loops
);
1337 static inline int io_run_local_work_locked(struct io_ring_ctx
*ctx
,
1340 struct io_tw_state ts
= {};
1342 if (llist_empty(&ctx
->work_llist
))
1344 return __io_run_local_work(ctx
, &ts
, min_events
);
1347 static int io_run_local_work(struct io_ring_ctx
*ctx
, int min_events
)
1349 struct io_tw_state ts
= {};
1352 mutex_lock(&ctx
->uring_lock
);
1353 ret
= __io_run_local_work(ctx
, &ts
, min_events
);
1354 mutex_unlock(&ctx
->uring_lock
);
1358 static void io_req_task_cancel(struct io_kiocb
*req
, struct io_tw_state
*ts
)
1360 io_tw_lock(req
->ctx
, ts
);
1361 io_req_defer_failed(req
, req
->cqe
.res
);
1364 void io_req_task_submit(struct io_kiocb
*req
, struct io_tw_state
*ts
)
1366 io_tw_lock(req
->ctx
, ts
);
1367 /* req->task == current here, checking PF_EXITING is safe */
1368 if (unlikely(req
->task
->flags
& PF_EXITING
))
1369 io_req_defer_failed(req
, -EFAULT
);
1370 else if (req
->flags
& REQ_F_FORCE_ASYNC
)
1376 void io_req_task_queue_fail(struct io_kiocb
*req
, int ret
)
1378 io_req_set_res(req
, ret
, 0);
1379 req
->io_task_work
.func
= io_req_task_cancel
;
1380 io_req_task_work_add(req
);
1383 void io_req_task_queue(struct io_kiocb
*req
)
1385 req
->io_task_work
.func
= io_req_task_submit
;
1386 io_req_task_work_add(req
);
1389 void io_queue_next(struct io_kiocb
*req
)
1391 struct io_kiocb
*nxt
= io_req_find_next(req
);
1394 io_req_task_queue(nxt
);
1397 static void io_free_batch_list(struct io_ring_ctx
*ctx
,
1398 struct io_wq_work_node
*node
)
1399 __must_hold(&ctx
->uring_lock
)
1402 struct io_kiocb
*req
= container_of(node
, struct io_kiocb
,
1405 if (unlikely(req
->flags
& IO_REQ_CLEAN_SLOW_FLAGS
)) {
1406 if (req
->flags
& REQ_F_REFCOUNT
) {
1407 node
= req
->comp_list
.next
;
1408 if (!req_ref_put_and_test(req
))
1411 if ((req
->flags
& REQ_F_POLLED
) && req
->apoll
) {
1412 struct async_poll
*apoll
= req
->apoll
;
1414 if (apoll
->double_poll
)
1415 kfree(apoll
->double_poll
);
1416 if (!io_alloc_cache_put(&ctx
->apoll_cache
, apoll
))
1418 req
->flags
&= ~REQ_F_POLLED
;
1420 if (req
->flags
& IO_REQ_LINK_FLAGS
)
1422 if (unlikely(req
->flags
& IO_REQ_CLEAN_FLAGS
))
1426 io_put_rsrc_node(ctx
, req
->rsrc_node
);
1427 io_put_task(req
->task
);
1429 node
= req
->comp_list
.next
;
1430 io_req_add_to_cache(req
, ctx
);
1434 void __io_submit_flush_completions(struct io_ring_ctx
*ctx
)
1435 __must_hold(&ctx
->uring_lock
)
1437 struct io_submit_state
*state
= &ctx
->submit_state
;
1438 struct io_wq_work_node
*node
;
1441 __wq_list_for_each(node
, &state
->compl_reqs
) {
1442 struct io_kiocb
*req
= container_of(node
, struct io_kiocb
,
1445 if (!(req
->flags
& REQ_F_CQE_SKIP
) &&
1446 unlikely(!io_fill_cqe_req(ctx
, req
))) {
1447 if (ctx
->lockless_cq
) {
1448 spin_lock(&ctx
->completion_lock
);
1449 io_req_cqe_overflow(req
);
1450 spin_unlock(&ctx
->completion_lock
);
1452 io_req_cqe_overflow(req
);
1456 __io_cq_unlock_post(ctx
);
1458 if (!wq_list_empty(&state
->compl_reqs
)) {
1459 io_free_batch_list(ctx
, state
->compl_reqs
.first
);
1460 INIT_WQ_LIST(&state
->compl_reqs
);
1462 ctx
->submit_state
.cq_flush
= false;
1465 static unsigned io_cqring_events(struct io_ring_ctx
*ctx
)
1467 /* See comment at the top of this file */
1469 return __io_cqring_events(ctx
);
1473 * We can't just wait for polled events to come to us, we have to actively
1474 * find and complete them.
1476 static __cold
void io_iopoll_try_reap_events(struct io_ring_ctx
*ctx
)
1478 if (!(ctx
->flags
& IORING_SETUP_IOPOLL
))
1481 mutex_lock(&ctx
->uring_lock
);
1482 while (!wq_list_empty(&ctx
->iopoll_list
)) {
1483 /* let it sleep and repeat later if can't complete a request */
1484 if (io_do_iopoll(ctx
, true) == 0)
1487 * Ensure we allow local-to-the-cpu processing to take place,
1488 * in this case we need to ensure that we reap all events.
1489 * Also let task_work, etc. to progress by releasing the mutex
1491 if (need_resched()) {
1492 mutex_unlock(&ctx
->uring_lock
);
1494 mutex_lock(&ctx
->uring_lock
);
1497 mutex_unlock(&ctx
->uring_lock
);
1500 static int io_iopoll_check(struct io_ring_ctx
*ctx
, long min
)
1502 unsigned int nr_events
= 0;
1503 unsigned long check_cq
;
1505 lockdep_assert_held(&ctx
->uring_lock
);
1507 if (!io_allowed_run_tw(ctx
))
1510 check_cq
= READ_ONCE(ctx
->check_cq
);
1511 if (unlikely(check_cq
)) {
1512 if (check_cq
& BIT(IO_CHECK_CQ_OVERFLOW_BIT
))
1513 __io_cqring_overflow_flush(ctx
, false);
1515 * Similarly do not spin if we have not informed the user of any
1518 if (check_cq
& BIT(IO_CHECK_CQ_DROPPED_BIT
))
1522 * Don't enter poll loop if we already have events pending.
1523 * If we do, we can potentially be spinning for commands that
1524 * already triggered a CQE (eg in error).
1526 if (io_cqring_events(ctx
))
1533 * If a submit got punted to a workqueue, we can have the
1534 * application entering polling for a command before it gets
1535 * issued. That app will hold the uring_lock for the duration
1536 * of the poll right here, so we need to take a breather every
1537 * now and then to ensure that the issue has a chance to add
1538 * the poll to the issued list. Otherwise we can spin here
1539 * forever, while the workqueue is stuck trying to acquire the
1542 if (wq_list_empty(&ctx
->iopoll_list
) ||
1543 io_task_work_pending(ctx
)) {
1544 u32 tail
= ctx
->cached_cq_tail
;
1546 (void) io_run_local_work_locked(ctx
, min
);
1548 if (task_work_pending(current
) ||
1549 wq_list_empty(&ctx
->iopoll_list
)) {
1550 mutex_unlock(&ctx
->uring_lock
);
1552 mutex_lock(&ctx
->uring_lock
);
1554 /* some requests don't go through iopoll_list */
1555 if (tail
!= ctx
->cached_cq_tail
||
1556 wq_list_empty(&ctx
->iopoll_list
))
1559 ret
= io_do_iopoll(ctx
, !min
);
1560 if (unlikely(ret
< 0))
1563 if (task_sigpending(current
))
1569 } while (nr_events
< min
);
1574 void io_req_task_complete(struct io_kiocb
*req
, struct io_tw_state
*ts
)
1576 io_req_complete_defer(req
);
1580 * After the iocb has been issued, it's safe to be found on the poll list.
1581 * Adding the kiocb to the list AFTER submission ensures that we don't
1582 * find it from a io_do_iopoll() thread before the issuer is done
1583 * accessing the kiocb cookie.
1585 static void io_iopoll_req_issued(struct io_kiocb
*req
, unsigned int issue_flags
)
1587 struct io_ring_ctx
*ctx
= req
->ctx
;
1588 const bool needs_lock
= issue_flags
& IO_URING_F_UNLOCKED
;
1590 /* workqueue context doesn't hold uring_lock, grab it now */
1591 if (unlikely(needs_lock
))
1592 mutex_lock(&ctx
->uring_lock
);
1595 * Track whether we have multiple files in our lists. This will impact
1596 * how we do polling eventually, not spinning if we're on potentially
1597 * different devices.
1599 if (wq_list_empty(&ctx
->iopoll_list
)) {
1600 ctx
->poll_multi_queue
= false;
1601 } else if (!ctx
->poll_multi_queue
) {
1602 struct io_kiocb
*list_req
;
1604 list_req
= container_of(ctx
->iopoll_list
.first
, struct io_kiocb
,
1606 if (list_req
->file
!= req
->file
)
1607 ctx
->poll_multi_queue
= true;
1611 * For fast devices, IO may have already completed. If it has, add
1612 * it to the front so we find it first.
1614 if (READ_ONCE(req
->iopoll_completed
))
1615 wq_list_add_head(&req
->comp_list
, &ctx
->iopoll_list
);
1617 wq_list_add_tail(&req
->comp_list
, &ctx
->iopoll_list
);
1619 if (unlikely(needs_lock
)) {
1621 * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
1622 * in sq thread task context or in io worker task context. If
1623 * current task context is sq thread, we don't need to check
1624 * whether should wake up sq thread.
1626 if ((ctx
->flags
& IORING_SETUP_SQPOLL
) &&
1627 wq_has_sleeper(&ctx
->sq_data
->wait
))
1628 wake_up(&ctx
->sq_data
->wait
);
1630 mutex_unlock(&ctx
->uring_lock
);
1634 io_req_flags_t
io_file_get_flags(struct file
*file
)
1636 io_req_flags_t res
= 0;
1638 if (S_ISREG(file_inode(file
)->i_mode
))
1640 if ((file
->f_flags
& O_NONBLOCK
) || (file
->f_mode
& FMODE_NOWAIT
))
1641 res
|= REQ_F_SUPPORT_NOWAIT
;
1645 bool io_alloc_async_data(struct io_kiocb
*req
)
1647 const struct io_issue_def
*def
= &io_issue_defs
[req
->opcode
];
1649 WARN_ON_ONCE(!def
->async_size
);
1650 req
->async_data
= kmalloc(def
->async_size
, GFP_KERNEL
);
1651 if (req
->async_data
) {
1652 req
->flags
|= REQ_F_ASYNC_DATA
;
1658 static u32
io_get_sequence(struct io_kiocb
*req
)
1660 u32 seq
= req
->ctx
->cached_sq_head
;
1661 struct io_kiocb
*cur
;
1663 /* need original cached_sq_head, but it was increased for each req */
1664 io_for_each_link(cur
, req
)
1669 static __cold
void io_drain_req(struct io_kiocb
*req
)
1670 __must_hold(&ctx
->uring_lock
)
1672 struct io_ring_ctx
*ctx
= req
->ctx
;
1673 struct io_defer_entry
*de
;
1675 u32 seq
= io_get_sequence(req
);
1677 /* Still need defer if there is pending req in defer list. */
1678 spin_lock(&ctx
->completion_lock
);
1679 if (!req_need_defer(req
, seq
) && list_empty_careful(&ctx
->defer_list
)) {
1680 spin_unlock(&ctx
->completion_lock
);
1682 ctx
->drain_active
= false;
1683 io_req_task_queue(req
);
1686 spin_unlock(&ctx
->completion_lock
);
1688 io_prep_async_link(req
);
1689 de
= kmalloc(sizeof(*de
), GFP_KERNEL
);
1692 io_req_defer_failed(req
, ret
);
1696 spin_lock(&ctx
->completion_lock
);
1697 if (!req_need_defer(req
, seq
) && list_empty(&ctx
->defer_list
)) {
1698 spin_unlock(&ctx
->completion_lock
);
1703 trace_io_uring_defer(req
);
1706 list_add_tail(&de
->list
, &ctx
->defer_list
);
1707 spin_unlock(&ctx
->completion_lock
);
1710 static bool io_assign_file(struct io_kiocb
*req
, const struct io_issue_def
*def
,
1711 unsigned int issue_flags
)
1713 if (req
->file
|| !def
->needs_file
)
1716 if (req
->flags
& REQ_F_FIXED_FILE
)
1717 req
->file
= io_file_get_fixed(req
, req
->cqe
.fd
, issue_flags
);
1719 req
->file
= io_file_get_normal(req
, req
->cqe
.fd
);
1724 static int io_issue_sqe(struct io_kiocb
*req
, unsigned int issue_flags
)
1726 const struct io_issue_def
*def
= &io_issue_defs
[req
->opcode
];
1727 const struct cred
*creds
= NULL
;
1730 if (unlikely(!io_assign_file(req
, def
, issue_flags
)))
1733 if (unlikely((req
->flags
& REQ_F_CREDS
) && req
->creds
!= current_cred()))
1734 creds
= override_creds(req
->creds
);
1736 if (!def
->audit_skip
)
1737 audit_uring_entry(req
->opcode
);
1739 ret
= def
->issue(req
, issue_flags
);
1741 if (!def
->audit_skip
)
1742 audit_uring_exit(!ret
, ret
);
1745 revert_creds(creds
);
1747 if (ret
== IOU_OK
) {
1748 if (issue_flags
& IO_URING_F_COMPLETE_DEFER
)
1749 io_req_complete_defer(req
);
1751 io_req_complete_post(req
, issue_flags
);
1756 if (ret
== IOU_ISSUE_SKIP_COMPLETE
) {
1758 io_arm_ltimeout(req
);
1760 /* If the op doesn't have a file, we're not polling for it */
1761 if ((req
->ctx
->flags
& IORING_SETUP_IOPOLL
) && def
->iopoll_queue
)
1762 io_iopoll_req_issued(req
, issue_flags
);
1767 int io_poll_issue(struct io_kiocb
*req
, struct io_tw_state
*ts
)
1769 io_tw_lock(req
->ctx
, ts
);
1770 return io_issue_sqe(req
, IO_URING_F_NONBLOCK
|IO_URING_F_MULTISHOT
|
1771 IO_URING_F_COMPLETE_DEFER
);
1774 struct io_wq_work
*io_wq_free_work(struct io_wq_work
*work
)
1776 struct io_kiocb
*req
= container_of(work
, struct io_kiocb
, work
);
1777 struct io_kiocb
*nxt
= NULL
;
1779 if (req_ref_put_and_test(req
)) {
1780 if (req
->flags
& IO_REQ_LINK_FLAGS
)
1781 nxt
= io_req_find_next(req
);
1784 return nxt
? &nxt
->work
: NULL
;
1787 void io_wq_submit_work(struct io_wq_work
*work
)
1789 struct io_kiocb
*req
= container_of(work
, struct io_kiocb
, work
);
1790 const struct io_issue_def
*def
= &io_issue_defs
[req
->opcode
];
1791 unsigned int issue_flags
= IO_URING_F_UNLOCKED
| IO_URING_F_IOWQ
;
1792 bool needs_poll
= false;
1793 int ret
= 0, err
= -ECANCELED
;
1795 /* one will be dropped by ->io_wq_free_work() after returning to io-wq */
1796 if (!(req
->flags
& REQ_F_REFCOUNT
))
1797 __io_req_set_refcount(req
, 2);
1801 io_arm_ltimeout(req
);
1803 /* either cancelled or io-wq is dying, so don't touch tctx->iowq */
1804 if (atomic_read(&work
->flags
) & IO_WQ_WORK_CANCEL
) {
1806 io_req_task_queue_fail(req
, err
);
1809 if (!io_assign_file(req
, def
, issue_flags
)) {
1811 atomic_or(IO_WQ_WORK_CANCEL
, &work
->flags
);
1816 * If DEFER_TASKRUN is set, it's only allowed to post CQEs from the
1817 * submitter task context. Final request completions are handed to the
1818 * right context, however this is not the case of auxiliary CQEs,
1819 * which is the main mean of operation for multishot requests.
1820 * Don't allow any multishot execution from io-wq. It's more restrictive
1821 * than necessary and also cleaner.
1823 if (req
->flags
& REQ_F_APOLL_MULTISHOT
) {
1825 if (!io_file_can_poll(req
))
1827 if (req
->file
->f_flags
& O_NONBLOCK
||
1828 req
->file
->f_mode
& FMODE_NOWAIT
) {
1830 if (io_arm_poll_handler(req
, issue_flags
) != IO_APOLL_OK
)
1834 req
->flags
&= ~REQ_F_APOLL_MULTISHOT
;
1838 if (req
->flags
& REQ_F_FORCE_ASYNC
) {
1839 bool opcode_poll
= def
->pollin
|| def
->pollout
;
1841 if (opcode_poll
&& io_file_can_poll(req
)) {
1843 issue_flags
|= IO_URING_F_NONBLOCK
;
1848 ret
= io_issue_sqe(req
, issue_flags
);
1853 * If REQ_F_NOWAIT is set, then don't wait or retry with
1854 * poll. -EAGAIN is final for that case.
1856 if (req
->flags
& REQ_F_NOWAIT
)
1860 * We can get EAGAIN for iopolled IO even though we're
1861 * forcing a sync submission from here, since we can't
1862 * wait for request slots on the block side.
1865 if (!(req
->ctx
->flags
& IORING_SETUP_IOPOLL
))
1867 if (io_wq_worker_stopped())
1873 if (io_arm_poll_handler(req
, issue_flags
) == IO_APOLL_OK
)
1875 /* aborted or ready, in either case retry blocking */
1877 issue_flags
&= ~IO_URING_F_NONBLOCK
;
1880 /* avoid locking problems by failing it from a clean context */
1882 io_req_task_queue_fail(req
, ret
);
1885 inline struct file
*io_file_get_fixed(struct io_kiocb
*req
, int fd
,
1886 unsigned int issue_flags
)
1888 struct io_ring_ctx
*ctx
= req
->ctx
;
1889 struct io_fixed_file
*slot
;
1890 struct file
*file
= NULL
;
1892 io_ring_submit_lock(ctx
, issue_flags
);
1894 if (unlikely((unsigned int)fd
>= ctx
->nr_user_files
))
1896 fd
= array_index_nospec(fd
, ctx
->nr_user_files
);
1897 slot
= io_fixed_file_slot(&ctx
->file_table
, fd
);
1898 if (!req
->rsrc_node
)
1899 __io_req_set_rsrc_node(req
, ctx
);
1900 req
->flags
|= io_slot_flags(slot
);
1901 file
= io_slot_file(slot
);
1903 io_ring_submit_unlock(ctx
, issue_flags
);
1907 struct file
*io_file_get_normal(struct io_kiocb
*req
, int fd
)
1909 struct file
*file
= fget(fd
);
1911 trace_io_uring_file_get(req
, fd
);
1913 /* we don't allow fixed io_uring files */
1914 if (file
&& io_is_uring_fops(file
))
1915 io_req_track_inflight(req
);
1919 static void io_queue_async(struct io_kiocb
*req
, int ret
)
1920 __must_hold(&req
->ctx
->uring_lock
)
1922 struct io_kiocb
*linked_timeout
;
1924 if (ret
!= -EAGAIN
|| (req
->flags
& REQ_F_NOWAIT
)) {
1925 io_req_defer_failed(req
, ret
);
1929 linked_timeout
= io_prep_linked_timeout(req
);
1931 switch (io_arm_poll_handler(req
, 0)) {
1932 case IO_APOLL_READY
:
1933 io_kbuf_recycle(req
, 0);
1934 io_req_task_queue(req
);
1936 case IO_APOLL_ABORTED
:
1937 io_kbuf_recycle(req
, 0);
1945 io_queue_linked_timeout(linked_timeout
);
1948 static inline void io_queue_sqe(struct io_kiocb
*req
)
1949 __must_hold(&req
->ctx
->uring_lock
)
1953 ret
= io_issue_sqe(req
, IO_URING_F_NONBLOCK
|IO_URING_F_COMPLETE_DEFER
);
1956 * We async punt it if the file wasn't marked NOWAIT, or if the file
1957 * doesn't support non-blocking read/write attempts
1960 io_queue_async(req
, ret
);
1963 static void io_queue_sqe_fallback(struct io_kiocb
*req
)
1964 __must_hold(&req
->ctx
->uring_lock
)
1966 if (unlikely(req
->flags
& REQ_F_FAIL
)) {
1968 * We don't submit, fail them all, for that replace hardlinks
1969 * with normal links. Extra REQ_F_LINK is tolerated.
1971 req
->flags
&= ~REQ_F_HARDLINK
;
1972 req
->flags
|= REQ_F_LINK
;
1973 io_req_defer_failed(req
, req
->cqe
.res
);
1975 if (unlikely(req
->ctx
->drain_active
))
1983 * Check SQE restrictions (opcode and flags).
1985 * Returns 'true' if SQE is allowed, 'false' otherwise.
1987 static inline bool io_check_restriction(struct io_ring_ctx
*ctx
,
1988 struct io_kiocb
*req
,
1989 unsigned int sqe_flags
)
1991 if (!test_bit(req
->opcode
, ctx
->restrictions
.sqe_op
))
1994 if ((sqe_flags
& ctx
->restrictions
.sqe_flags_required
) !=
1995 ctx
->restrictions
.sqe_flags_required
)
1998 if (sqe_flags
& ~(ctx
->restrictions
.sqe_flags_allowed
|
1999 ctx
->restrictions
.sqe_flags_required
))
2005 static void io_init_req_drain(struct io_kiocb
*req
)
2007 struct io_ring_ctx
*ctx
= req
->ctx
;
2008 struct io_kiocb
*head
= ctx
->submit_state
.link
.head
;
2010 ctx
->drain_active
= true;
2013 * If we need to drain a request in the middle of a link, drain
2014 * the head request and the next request/link after the current
2015 * link. Considering sequential execution of links,
2016 * REQ_F_IO_DRAIN will be maintained for every request of our
2019 head
->flags
|= REQ_F_IO_DRAIN
| REQ_F_FORCE_ASYNC
;
2020 ctx
->drain_next
= true;
2024 static __cold
int io_init_fail_req(struct io_kiocb
*req
, int err
)
2026 /* ensure per-opcode data is cleared if we fail before prep */
2027 memset(&req
->cmd
.data
, 0, sizeof(req
->cmd
.data
));
2031 static int io_init_req(struct io_ring_ctx
*ctx
, struct io_kiocb
*req
,
2032 const struct io_uring_sqe
*sqe
)
2033 __must_hold(&ctx
->uring_lock
)
2035 const struct io_issue_def
*def
;
2036 unsigned int sqe_flags
;
2040 /* req is partially pre-initialised, see io_preinit_req() */
2041 req
->opcode
= opcode
= READ_ONCE(sqe
->opcode
);
2042 /* same numerical values with corresponding REQ_F_*, safe to copy */
2043 sqe_flags
= READ_ONCE(sqe
->flags
);
2044 req
->flags
= (__force io_req_flags_t
) sqe_flags
;
2045 req
->cqe
.user_data
= READ_ONCE(sqe
->user_data
);
2047 req
->rsrc_node
= NULL
;
2048 req
->task
= current
;
2049 req
->cancel_seq_set
= false;
2051 if (unlikely(opcode
>= IORING_OP_LAST
)) {
2053 return io_init_fail_req(req
, -EINVAL
);
2055 def
= &io_issue_defs
[opcode
];
2056 if (unlikely(sqe_flags
& ~SQE_COMMON_FLAGS
)) {
2057 /* enforce forwards compatibility on users */
2058 if (sqe_flags
& ~SQE_VALID_FLAGS
)
2059 return io_init_fail_req(req
, -EINVAL
);
2060 if (sqe_flags
& IOSQE_BUFFER_SELECT
) {
2061 if (!def
->buffer_select
)
2062 return io_init_fail_req(req
, -EOPNOTSUPP
);
2063 req
->buf_index
= READ_ONCE(sqe
->buf_group
);
2065 if (sqe_flags
& IOSQE_CQE_SKIP_SUCCESS
)
2066 ctx
->drain_disabled
= true;
2067 if (sqe_flags
& IOSQE_IO_DRAIN
) {
2068 if (ctx
->drain_disabled
)
2069 return io_init_fail_req(req
, -EOPNOTSUPP
);
2070 io_init_req_drain(req
);
2073 if (unlikely(ctx
->restricted
|| ctx
->drain_active
|| ctx
->drain_next
)) {
2074 if (ctx
->restricted
&& !io_check_restriction(ctx
, req
, sqe_flags
))
2075 return io_init_fail_req(req
, -EACCES
);
2076 /* knock it to the slow queue path, will be drained there */
2077 if (ctx
->drain_active
)
2078 req
->flags
|= REQ_F_FORCE_ASYNC
;
2079 /* if there is no link, we're at "next" request and need to drain */
2080 if (unlikely(ctx
->drain_next
) && !ctx
->submit_state
.link
.head
) {
2081 ctx
->drain_next
= false;
2082 ctx
->drain_active
= true;
2083 req
->flags
|= REQ_F_IO_DRAIN
| REQ_F_FORCE_ASYNC
;
2087 if (!def
->ioprio
&& sqe
->ioprio
)
2088 return io_init_fail_req(req
, -EINVAL
);
2089 if (!def
->iopoll
&& (ctx
->flags
& IORING_SETUP_IOPOLL
))
2090 return io_init_fail_req(req
, -EINVAL
);
2092 if (def
->needs_file
) {
2093 struct io_submit_state
*state
= &ctx
->submit_state
;
2095 req
->cqe
.fd
= READ_ONCE(sqe
->fd
);
2098 * Plug now if we have more than 2 IO left after this, and the
2099 * target is potentially a read/write to block based storage.
2101 if (state
->need_plug
&& def
->plug
) {
2102 state
->plug_started
= true;
2103 state
->need_plug
= false;
2104 blk_start_plug_nr_ios(&state
->plug
, state
->submit_nr
);
2108 personality
= READ_ONCE(sqe
->personality
);
2112 req
->creds
= xa_load(&ctx
->personalities
, personality
);
2114 return io_init_fail_req(req
, -EINVAL
);
2115 get_cred(req
->creds
);
2116 ret
= security_uring_override_creds(req
->creds
);
2118 put_cred(req
->creds
);
2119 return io_init_fail_req(req
, ret
);
2121 req
->flags
|= REQ_F_CREDS
;
2124 return def
->prep(req
, sqe
);
2127 static __cold
int io_submit_fail_init(const struct io_uring_sqe
*sqe
,
2128 struct io_kiocb
*req
, int ret
)
2130 struct io_ring_ctx
*ctx
= req
->ctx
;
2131 struct io_submit_link
*link
= &ctx
->submit_state
.link
;
2132 struct io_kiocb
*head
= link
->head
;
2134 trace_io_uring_req_failed(sqe
, req
, ret
);
2137 * Avoid breaking links in the middle as it renders links with SQPOLL
2138 * unusable. Instead of failing eagerly, continue assembling the link if
2139 * applicable and mark the head with REQ_F_FAIL. The link flushing code
2140 * should find the flag and handle the rest.
2142 req_fail_link_node(req
, ret
);
2143 if (head
&& !(head
->flags
& REQ_F_FAIL
))
2144 req_fail_link_node(head
, -ECANCELED
);
2146 if (!(req
->flags
& IO_REQ_LINK_FLAGS
)) {
2148 link
->last
->link
= req
;
2152 io_queue_sqe_fallback(req
);
2157 link
->last
->link
= req
;
2164 static inline int io_submit_sqe(struct io_ring_ctx
*ctx
, struct io_kiocb
*req
,
2165 const struct io_uring_sqe
*sqe
)
2166 __must_hold(&ctx
->uring_lock
)
2168 struct io_submit_link
*link
= &ctx
->submit_state
.link
;
2171 ret
= io_init_req(ctx
, req
, sqe
);
2173 return io_submit_fail_init(sqe
, req
, ret
);
2175 trace_io_uring_submit_req(req
);
2178 * If we already have a head request, queue this one for async
2179 * submittal once the head completes. If we don't have a head but
2180 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
2181 * submitted sync once the chain is complete. If none of those
2182 * conditions are true (normal request), then just queue it.
2184 if (unlikely(link
->head
)) {
2185 trace_io_uring_link(req
, link
->last
);
2186 link
->last
->link
= req
;
2189 if (req
->flags
& IO_REQ_LINK_FLAGS
)
2191 /* last request of the link, flush it */
2194 if (req
->flags
& (REQ_F_FORCE_ASYNC
| REQ_F_FAIL
))
2197 } else if (unlikely(req
->flags
& (IO_REQ_LINK_FLAGS
|
2198 REQ_F_FORCE_ASYNC
| REQ_F_FAIL
))) {
2199 if (req
->flags
& IO_REQ_LINK_FLAGS
) {
2204 io_queue_sqe_fallback(req
);
2214 * Batched submission is done, ensure local IO is flushed out.
2216 static void io_submit_state_end(struct io_ring_ctx
*ctx
)
2218 struct io_submit_state
*state
= &ctx
->submit_state
;
2220 if (unlikely(state
->link
.head
))
2221 io_queue_sqe_fallback(state
->link
.head
);
2222 /* flush only after queuing links as they can generate completions */
2223 io_submit_flush_completions(ctx
);
2224 if (state
->plug_started
)
2225 blk_finish_plug(&state
->plug
);
2229 * Start submission side cache.
2231 static void io_submit_state_start(struct io_submit_state
*state
,
2232 unsigned int max_ios
)
2234 state
->plug_started
= false;
2235 state
->need_plug
= max_ios
> 2;
2236 state
->submit_nr
= max_ios
;
2237 /* set only head, no need to init link_last in advance */
2238 state
->link
.head
= NULL
;
2241 static void io_commit_sqring(struct io_ring_ctx
*ctx
)
2243 struct io_rings
*rings
= ctx
->rings
;
2246 * Ensure any loads from the SQEs are done at this point,
2247 * since once we write the new head, the application could
2248 * write new data to them.
2250 smp_store_release(&rings
->sq
.head
, ctx
->cached_sq_head
);
2254 * Fetch an sqe, if one is available. Note this returns a pointer to memory
2255 * that is mapped by userspace. This means that care needs to be taken to
2256 * ensure that reads are stable, as we cannot rely on userspace always
2257 * being a good citizen. If members of the sqe are validated and then later
2258 * used, it's important that those reads are done through READ_ONCE() to
2259 * prevent a re-load down the line.
2261 static bool io_get_sqe(struct io_ring_ctx
*ctx
, const struct io_uring_sqe
**sqe
)
2263 unsigned mask
= ctx
->sq_entries
- 1;
2264 unsigned head
= ctx
->cached_sq_head
++ & mask
;
2266 if (!(ctx
->flags
& IORING_SETUP_NO_SQARRAY
)) {
2267 head
= READ_ONCE(ctx
->sq_array
[head
]);
2268 if (unlikely(head
>= ctx
->sq_entries
)) {
2269 /* drop invalid entries */
2270 spin_lock(&ctx
->completion_lock
);
2272 spin_unlock(&ctx
->completion_lock
);
2273 WRITE_ONCE(ctx
->rings
->sq_dropped
,
2274 READ_ONCE(ctx
->rings
->sq_dropped
) + 1);
2280 * The cached sq head (or cq tail) serves two purposes:
2282 * 1) allows us to batch the cost of updating the user visible
2284 * 2) allows the kernel side to track the head on its own, even
2285 * though the application is the one updating it.
2288 /* double index for 128-byte SQEs, twice as long */
2289 if (ctx
->flags
& IORING_SETUP_SQE128
)
2291 *sqe
= &ctx
->sq_sqes
[head
];
2295 int io_submit_sqes(struct io_ring_ctx
*ctx
, unsigned int nr
)
2296 __must_hold(&ctx
->uring_lock
)
2298 unsigned int entries
= io_sqring_entries(ctx
);
2302 if (unlikely(!entries
))
2304 /* make sure SQ entry isn't read before tail */
2305 ret
= left
= min(nr
, entries
);
2306 io_get_task_refs(left
);
2307 io_submit_state_start(&ctx
->submit_state
, left
);
2310 const struct io_uring_sqe
*sqe
;
2311 struct io_kiocb
*req
;
2313 if (unlikely(!io_alloc_req(ctx
, &req
)))
2315 if (unlikely(!io_get_sqe(ctx
, &sqe
))) {
2316 io_req_add_to_cache(req
, ctx
);
2321 * Continue submitting even for sqe failure if the
2322 * ring was setup with IORING_SETUP_SUBMIT_ALL
2324 if (unlikely(io_submit_sqe(ctx
, req
, sqe
)) &&
2325 !(ctx
->flags
& IORING_SETUP_SUBMIT_ALL
)) {
2331 if (unlikely(left
)) {
2333 /* try again if it submitted nothing and can't allocate a req */
2334 if (!ret
&& io_req_cache_empty(ctx
))
2336 current
->io_uring
->cached_refs
+= left
;
2339 io_submit_state_end(ctx
);
2340 /* Commit SQ ring head once we've consumed and submitted all SQEs */
2341 io_commit_sqring(ctx
);
2345 static int io_wake_function(struct wait_queue_entry
*curr
, unsigned int mode
,
2346 int wake_flags
, void *key
)
2348 struct io_wait_queue
*iowq
= container_of(curr
, struct io_wait_queue
, wq
);
2351 * Cannot safely flush overflowed CQEs from here, ensure we wake up
2352 * the task, and the next invocation will do it.
2354 if (io_should_wake(iowq
) || io_has_work(iowq
->ctx
))
2355 return autoremove_wake_function(curr
, mode
, wake_flags
, key
);
2359 int io_run_task_work_sig(struct io_ring_ctx
*ctx
)
2361 if (!llist_empty(&ctx
->work_llist
)) {
2362 __set_current_state(TASK_RUNNING
);
2363 if (io_run_local_work(ctx
, INT_MAX
) > 0)
2366 if (io_run_task_work() > 0)
2368 if (task_sigpending(current
))
2373 static bool current_pending_io(void)
2375 struct io_uring_task
*tctx
= current
->io_uring
;
2379 return percpu_counter_read_positive(&tctx
->inflight
);
2382 static enum hrtimer_restart
io_cqring_timer_wakeup(struct hrtimer
*timer
)
2384 struct io_wait_queue
*iowq
= container_of(timer
, struct io_wait_queue
, t
);
2386 WRITE_ONCE(iowq
->hit_timeout
, 1);
2387 iowq
->min_timeout
= 0;
2388 wake_up_process(iowq
->wq
.private);
2389 return HRTIMER_NORESTART
;
2393 * Doing min_timeout portion. If we saw any timeouts, events, or have work,
2394 * wake up. If not, and we have a normal timeout, switch to that and keep
2397 static enum hrtimer_restart
io_cqring_min_timer_wakeup(struct hrtimer
*timer
)
2399 struct io_wait_queue
*iowq
= container_of(timer
, struct io_wait_queue
, t
);
2400 struct io_ring_ctx
*ctx
= iowq
->ctx
;
2402 /* no general timeout, or shorter (or equal), we are done */
2403 if (iowq
->timeout
== KTIME_MAX
||
2404 ktime_compare(iowq
->min_timeout
, iowq
->timeout
) >= 0)
2406 /* work we may need to run, wake function will see if we need to wake */
2407 if (io_has_work(ctx
))
2409 /* got events since we started waiting, min timeout is done */
2410 if (iowq
->cq_min_tail
!= READ_ONCE(ctx
->rings
->cq
.tail
))
2412 /* if we have any events and min timeout expired, we're done */
2413 if (io_cqring_events(ctx
))
2417 * If using deferred task_work running and application is waiting on
2418 * more than one request, ensure we reset it now where we are switching
2419 * to normal sleeps. Any request completion post min_wait should wake
2420 * the task and return.
2422 if (ctx
->flags
& IORING_SETUP_DEFER_TASKRUN
) {
2423 atomic_set(&ctx
->cq_wait_nr
, 1);
2425 if (!llist_empty(&ctx
->work_llist
))
2429 iowq
->t
.function
= io_cqring_timer_wakeup
;
2430 hrtimer_set_expires(timer
, iowq
->timeout
);
2431 return HRTIMER_RESTART
;
2433 return io_cqring_timer_wakeup(timer
);
2436 static int io_cqring_schedule_timeout(struct io_wait_queue
*iowq
,
2437 clockid_t clock_id
, ktime_t start_time
)
2441 hrtimer_init_on_stack(&iowq
->t
, clock_id
, HRTIMER_MODE_ABS
);
2442 if (iowq
->min_timeout
) {
2443 timeout
= ktime_add_ns(iowq
->min_timeout
, start_time
);
2444 iowq
->t
.function
= io_cqring_min_timer_wakeup
;
2446 timeout
= iowq
->timeout
;
2447 iowq
->t
.function
= io_cqring_timer_wakeup
;
2450 hrtimer_set_expires_range_ns(&iowq
->t
, timeout
, 0);
2451 hrtimer_start_expires(&iowq
->t
, HRTIMER_MODE_ABS
);
2453 if (!READ_ONCE(iowq
->hit_timeout
))
2456 hrtimer_cancel(&iowq
->t
);
2457 destroy_hrtimer_on_stack(&iowq
->t
);
2458 __set_current_state(TASK_RUNNING
);
2460 return READ_ONCE(iowq
->hit_timeout
) ? -ETIME
: 0;
2463 static int __io_cqring_wait_schedule(struct io_ring_ctx
*ctx
,
2464 struct io_wait_queue
*iowq
,
2470 * Mark us as being in io_wait if we have pending requests, so cpufreq
2471 * can take into account that the task is waiting for IO - turns out
2472 * to be important for low QD IO.
2474 if (current_pending_io())
2475 current
->in_iowait
= 1;
2476 if (iowq
->timeout
!= KTIME_MAX
|| iowq
->min_timeout
)
2477 ret
= io_cqring_schedule_timeout(iowq
, ctx
->clockid
, start_time
);
2480 current
->in_iowait
= 0;
2484 /* If this returns > 0, the caller should retry */
2485 static inline int io_cqring_wait_schedule(struct io_ring_ctx
*ctx
,
2486 struct io_wait_queue
*iowq
,
2489 if (unlikely(READ_ONCE(ctx
->check_cq
)))
2491 if (unlikely(!llist_empty(&ctx
->work_llist
)))
2493 if (unlikely(task_work_pending(current
)))
2495 if (unlikely(task_sigpending(current
)))
2497 if (unlikely(io_should_wake(iowq
)))
2500 return __io_cqring_wait_schedule(ctx
, iowq
, start_time
);
2505 struct __kernel_timespec __user
*ts
;
2506 const sigset_t __user
*sig
;
2511 * Wait until events become available, if we don't already have some. The
2512 * application must reap them itself, as they reside on the shared cq ring.
2514 static int io_cqring_wait(struct io_ring_ctx
*ctx
, int min_events
, u32 flags
,
2515 struct ext_arg
*ext_arg
)
2517 struct io_wait_queue iowq
;
2518 struct io_rings
*rings
= ctx
->rings
;
2522 if (!io_allowed_run_tw(ctx
))
2524 if (!llist_empty(&ctx
->work_llist
))
2525 io_run_local_work(ctx
, min_events
);
2528 if (unlikely(test_bit(IO_CHECK_CQ_OVERFLOW_BIT
, &ctx
->check_cq
)))
2529 io_cqring_do_overflow_flush(ctx
);
2530 if (__io_cqring_events_user(ctx
) >= min_events
)
2533 init_waitqueue_func_entry(&iowq
.wq
, io_wake_function
);
2534 iowq
.wq
.private = current
;
2535 INIT_LIST_HEAD(&iowq
.wq
.entry
);
2537 iowq
.cq_tail
= READ_ONCE(ctx
->rings
->cq
.head
) + min_events
;
2538 iowq
.cq_min_tail
= READ_ONCE(ctx
->rings
->cq
.tail
);
2539 iowq
.nr_timeouts
= atomic_read(&ctx
->cq_timeouts
);
2540 iowq
.hit_timeout
= 0;
2541 iowq
.min_timeout
= ext_arg
->min_time
;
2542 iowq
.timeout
= KTIME_MAX
;
2543 start_time
= io_get_time(ctx
);
2546 struct timespec64 ts
;
2548 if (get_timespec64(&ts
, ext_arg
->ts
))
2551 iowq
.timeout
= timespec64_to_ktime(ts
);
2552 if (!(flags
& IORING_ENTER_ABS_TIMER
))
2553 iowq
.timeout
= ktime_add(iowq
.timeout
, start_time
);
2557 #ifdef CONFIG_COMPAT
2558 if (in_compat_syscall())
2559 ret
= set_compat_user_sigmask((const compat_sigset_t __user
*)ext_arg
->sig
,
2563 ret
= set_user_sigmask(ext_arg
->sig
, ext_arg
->argsz
);
2569 io_napi_busy_loop(ctx
, &iowq
);
2571 trace_io_uring_cqring_wait(ctx
, min_events
);
2573 unsigned long check_cq
;
2576 /* if min timeout has been hit, don't reset wait count */
2577 if (!iowq
.hit_timeout
)
2578 nr_wait
= (int) iowq
.cq_tail
-
2579 READ_ONCE(ctx
->rings
->cq
.tail
);
2583 if (ctx
->flags
& IORING_SETUP_DEFER_TASKRUN
) {
2584 atomic_set(&ctx
->cq_wait_nr
, nr_wait
);
2585 set_current_state(TASK_INTERRUPTIBLE
);
2587 prepare_to_wait_exclusive(&ctx
->cq_wait
, &iowq
.wq
,
2588 TASK_INTERRUPTIBLE
);
2591 ret
= io_cqring_wait_schedule(ctx
, &iowq
, start_time
);
2592 __set_current_state(TASK_RUNNING
);
2593 atomic_set(&ctx
->cq_wait_nr
, IO_CQ_WAKE_INIT
);
2596 * Run task_work after scheduling and before io_should_wake().
2597 * If we got woken because of task_work being processed, run it
2598 * now rather than let the caller do another wait loop.
2600 if (!llist_empty(&ctx
->work_llist
))
2601 io_run_local_work(ctx
, nr_wait
);
2605 * Non-local task_work will be run on exit to userspace, but
2606 * if we're using DEFER_TASKRUN, then we could have waited
2607 * with a timeout for a number of requests. If the timeout
2608 * hits, we could have some requests ready to process. Ensure
2609 * this break is _after_ we have run task_work, to avoid
2610 * deferring running potentially pending requests until the
2611 * next time we wait for events.
2616 check_cq
= READ_ONCE(ctx
->check_cq
);
2617 if (unlikely(check_cq
)) {
2618 /* let the caller flush overflows, retry */
2619 if (check_cq
& BIT(IO_CHECK_CQ_OVERFLOW_BIT
))
2620 io_cqring_do_overflow_flush(ctx
);
2621 if (check_cq
& BIT(IO_CHECK_CQ_DROPPED_BIT
)) {
2627 if (io_should_wake(&iowq
)) {
2634 if (!(ctx
->flags
& IORING_SETUP_DEFER_TASKRUN
))
2635 finish_wait(&ctx
->cq_wait
, &iowq
.wq
);
2636 restore_saved_sigmask_unless(ret
== -EINTR
);
2638 return READ_ONCE(rings
->cq
.head
) == READ_ONCE(rings
->cq
.tail
) ? ret
: 0;
2641 static void *io_rings_map(struct io_ring_ctx
*ctx
, unsigned long uaddr
,
2644 return __io_uaddr_map(&ctx
->ring_pages
, &ctx
->n_ring_pages
, uaddr
,
2648 static void *io_sqes_map(struct io_ring_ctx
*ctx
, unsigned long uaddr
,
2651 return __io_uaddr_map(&ctx
->sqe_pages
, &ctx
->n_sqe_pages
, uaddr
,
2655 static void io_rings_free(struct io_ring_ctx
*ctx
)
2657 if (!(ctx
->flags
& IORING_SETUP_NO_MMAP
)) {
2658 io_pages_unmap(ctx
->rings
, &ctx
->ring_pages
, &ctx
->n_ring_pages
,
2660 io_pages_unmap(ctx
->sq_sqes
, &ctx
->sqe_pages
, &ctx
->n_sqe_pages
,
2663 io_pages_free(&ctx
->ring_pages
, ctx
->n_ring_pages
);
2664 ctx
->n_ring_pages
= 0;
2665 io_pages_free(&ctx
->sqe_pages
, ctx
->n_sqe_pages
);
2666 ctx
->n_sqe_pages
= 0;
2668 vunmap(ctx
->sq_sqes
);
2672 ctx
->sq_sqes
= NULL
;
2675 static unsigned long rings_size(struct io_ring_ctx
*ctx
, unsigned int sq_entries
,
2676 unsigned int cq_entries
, size_t *sq_offset
)
2678 struct io_rings
*rings
;
2679 size_t off
, sq_array_size
;
2681 off
= struct_size(rings
, cqes
, cq_entries
);
2682 if (off
== SIZE_MAX
)
2684 if (ctx
->flags
& IORING_SETUP_CQE32
) {
2685 if (check_shl_overflow(off
, 1, &off
))
2690 off
= ALIGN(off
, SMP_CACHE_BYTES
);
2695 if (ctx
->flags
& IORING_SETUP_NO_SQARRAY
) {
2696 *sq_offset
= SIZE_MAX
;
2702 sq_array_size
= array_size(sizeof(u32
), sq_entries
);
2703 if (sq_array_size
== SIZE_MAX
)
2706 if (check_add_overflow(off
, sq_array_size
, &off
))
2712 static void io_req_caches_free(struct io_ring_ctx
*ctx
)
2714 struct io_kiocb
*req
;
2717 mutex_lock(&ctx
->uring_lock
);
2719 while (!io_req_cache_empty(ctx
)) {
2720 req
= io_extract_req(ctx
);
2721 kmem_cache_free(req_cachep
, req
);
2725 percpu_ref_put_many(&ctx
->refs
, nr
);
2726 mutex_unlock(&ctx
->uring_lock
);
2729 static __cold
void io_ring_ctx_free(struct io_ring_ctx
*ctx
)
2731 io_sq_thread_finish(ctx
);
2732 /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
2733 if (WARN_ON_ONCE(!list_empty(&ctx
->rsrc_ref_list
)))
2736 mutex_lock(&ctx
->uring_lock
);
2738 __io_sqe_buffers_unregister(ctx
);
2740 __io_sqe_files_unregister(ctx
);
2741 io_cqring_overflow_kill(ctx
);
2742 io_eventfd_unregister(ctx
);
2743 io_alloc_cache_free(&ctx
->apoll_cache
, kfree
);
2744 io_alloc_cache_free(&ctx
->netmsg_cache
, io_netmsg_cache_free
);
2745 io_alloc_cache_free(&ctx
->rw_cache
, io_rw_cache_free
);
2746 io_alloc_cache_free(&ctx
->uring_cache
, kfree
);
2747 io_alloc_cache_free(&ctx
->msg_cache
, io_msg_cache_free
);
2748 io_futex_cache_free(ctx
);
2749 io_destroy_buffers(ctx
);
2750 mutex_unlock(&ctx
->uring_lock
);
2752 put_cred(ctx
->sq_creds
);
2753 if (ctx
->submitter_task
)
2754 put_task_struct(ctx
->submitter_task
);
2756 /* there are no registered resources left, nobody uses it */
2758 io_rsrc_node_destroy(ctx
, ctx
->rsrc_node
);
2760 WARN_ON_ONCE(!list_empty(&ctx
->rsrc_ref_list
));
2761 WARN_ON_ONCE(!list_empty(&ctx
->ltimeout_list
));
2763 io_alloc_cache_free(&ctx
->rsrc_node_cache
, kfree
);
2764 if (ctx
->mm_account
) {
2765 mmdrop(ctx
->mm_account
);
2766 ctx
->mm_account
= NULL
;
2770 percpu_ref_exit(&ctx
->refs
);
2771 free_uid(ctx
->user
);
2772 io_req_caches_free(ctx
);
2774 io_wq_put_hash(ctx
->hash_map
);
2776 kfree(ctx
->cancel_table
.hbs
);
2777 kfree(ctx
->cancel_table_locked
.hbs
);
2778 xa_destroy(&ctx
->io_bl_xa
);
2782 static __cold
void io_activate_pollwq_cb(struct callback_head
*cb
)
2784 struct io_ring_ctx
*ctx
= container_of(cb
, struct io_ring_ctx
,
2787 mutex_lock(&ctx
->uring_lock
);
2788 ctx
->poll_activated
= true;
2789 mutex_unlock(&ctx
->uring_lock
);
2792 * Wake ups for some events between start of polling and activation
2793 * might've been lost due to loose synchronisation.
2795 wake_up_all(&ctx
->poll_wq
);
2796 percpu_ref_put(&ctx
->refs
);
2799 __cold
void io_activate_pollwq(struct io_ring_ctx
*ctx
)
2801 spin_lock(&ctx
->completion_lock
);
2802 /* already activated or in progress */
2803 if (ctx
->poll_activated
|| ctx
->poll_wq_task_work
.func
)
2805 if (WARN_ON_ONCE(!ctx
->task_complete
))
2807 if (!ctx
->submitter_task
)
2810 * with ->submitter_task only the submitter task completes requests, we
2811 * only need to sync with it, which is done by injecting a tw
2813 init_task_work(&ctx
->poll_wq_task_work
, io_activate_pollwq_cb
);
2814 percpu_ref_get(&ctx
->refs
);
2815 if (task_work_add(ctx
->submitter_task
, &ctx
->poll_wq_task_work
, TWA_SIGNAL
))
2816 percpu_ref_put(&ctx
->refs
);
2818 spin_unlock(&ctx
->completion_lock
);
2821 static __poll_t
io_uring_poll(struct file
*file
, poll_table
*wait
)
2823 struct io_ring_ctx
*ctx
= file
->private_data
;
2826 if (unlikely(!ctx
->poll_activated
))
2827 io_activate_pollwq(ctx
);
2829 poll_wait(file
, &ctx
->poll_wq
, wait
);
2831 * synchronizes with barrier from wq_has_sleeper call in
2835 if (!io_sqring_full(ctx
))
2836 mask
|= EPOLLOUT
| EPOLLWRNORM
;
2839 * Don't flush cqring overflow list here, just do a simple check.
2840 * Otherwise there could possible be ABBA deadlock:
2843 * lock(&ctx->uring_lock);
2845 * lock(&ctx->uring_lock);
2848 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
2849 * pushes them to do the flush.
2852 if (__io_cqring_events_user(ctx
) || io_has_work(ctx
))
2853 mask
|= EPOLLIN
| EPOLLRDNORM
;
2858 struct io_tctx_exit
{
2859 struct callback_head task_work
;
2860 struct completion completion
;
2861 struct io_ring_ctx
*ctx
;
2864 static __cold
void io_tctx_exit_cb(struct callback_head
*cb
)
2866 struct io_uring_task
*tctx
= current
->io_uring
;
2867 struct io_tctx_exit
*work
;
2869 work
= container_of(cb
, struct io_tctx_exit
, task_work
);
2871 * When @in_cancel, we're in cancellation and it's racy to remove the
2872 * node. It'll be removed by the end of cancellation, just ignore it.
2873 * tctx can be NULL if the queueing of this task_work raced with
2874 * work cancelation off the exec path.
2876 if (tctx
&& !atomic_read(&tctx
->in_cancel
))
2877 io_uring_del_tctx_node((unsigned long)work
->ctx
);
2878 complete(&work
->completion
);
2881 static __cold
bool io_cancel_ctx_cb(struct io_wq_work
*work
, void *data
)
2883 struct io_kiocb
*req
= container_of(work
, struct io_kiocb
, work
);
2885 return req
->ctx
== data
;
2888 static __cold
void io_ring_exit_work(struct work_struct
*work
)
2890 struct io_ring_ctx
*ctx
= container_of(work
, struct io_ring_ctx
, exit_work
);
2891 unsigned long timeout
= jiffies
+ HZ
* 60 * 5;
2892 unsigned long interval
= HZ
/ 20;
2893 struct io_tctx_exit exit
;
2894 struct io_tctx_node
*node
;
2898 * If we're doing polled IO and end up having requests being
2899 * submitted async (out-of-line), then completions can come in while
2900 * we're waiting for refs to drop. We need to reap these manually,
2901 * as nobody else will be looking for them.
2904 if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT
, &ctx
->check_cq
)) {
2905 mutex_lock(&ctx
->uring_lock
);
2906 io_cqring_overflow_kill(ctx
);
2907 mutex_unlock(&ctx
->uring_lock
);
2910 if (ctx
->flags
& IORING_SETUP_DEFER_TASKRUN
)
2911 io_move_task_work_from_local(ctx
);
2913 while (io_uring_try_cancel_requests(ctx
, NULL
, true))
2917 struct io_sq_data
*sqd
= ctx
->sq_data
;
2918 struct task_struct
*tsk
;
2920 io_sq_thread_park(sqd
);
2922 if (tsk
&& tsk
->io_uring
&& tsk
->io_uring
->io_wq
)
2923 io_wq_cancel_cb(tsk
->io_uring
->io_wq
,
2924 io_cancel_ctx_cb
, ctx
, true);
2925 io_sq_thread_unpark(sqd
);
2928 io_req_caches_free(ctx
);
2930 if (WARN_ON_ONCE(time_after(jiffies
, timeout
))) {
2931 /* there is little hope left, don't run it too often */
2935 * This is really an uninterruptible wait, as it has to be
2936 * complete. But it's also run from a kworker, which doesn't
2937 * take signals, so it's fine to make it interruptible. This
2938 * avoids scenarios where we knowingly can wait much longer
2939 * on completions, for example if someone does a SIGSTOP on
2940 * a task that needs to finish task_work to make this loop
2941 * complete. That's a synthetic situation that should not
2942 * cause a stuck task backtrace, and hence a potential panic
2943 * on stuck tasks if that is enabled.
2945 } while (!wait_for_completion_interruptible_timeout(&ctx
->ref_comp
, interval
));
2947 init_completion(&exit
.completion
);
2948 init_task_work(&exit
.task_work
, io_tctx_exit_cb
);
2951 mutex_lock(&ctx
->uring_lock
);
2952 while (!list_empty(&ctx
->tctx_list
)) {
2953 WARN_ON_ONCE(time_after(jiffies
, timeout
));
2955 node
= list_first_entry(&ctx
->tctx_list
, struct io_tctx_node
,
2957 /* don't spin on a single task if cancellation failed */
2958 list_rotate_left(&ctx
->tctx_list
);
2959 ret
= task_work_add(node
->task
, &exit
.task_work
, TWA_SIGNAL
);
2960 if (WARN_ON_ONCE(ret
))
2963 mutex_unlock(&ctx
->uring_lock
);
2965 * See comment above for
2966 * wait_for_completion_interruptible_timeout() on why this
2967 * wait is marked as interruptible.
2969 wait_for_completion_interruptible(&exit
.completion
);
2970 mutex_lock(&ctx
->uring_lock
);
2972 mutex_unlock(&ctx
->uring_lock
);
2973 spin_lock(&ctx
->completion_lock
);
2974 spin_unlock(&ctx
->completion_lock
);
2976 /* pairs with RCU read section in io_req_local_work_add() */
2977 if (ctx
->flags
& IORING_SETUP_DEFER_TASKRUN
)
2980 io_ring_ctx_free(ctx
);
2983 static __cold
void io_ring_ctx_wait_and_kill(struct io_ring_ctx
*ctx
)
2985 unsigned long index
;
2986 struct creds
*creds
;
2988 mutex_lock(&ctx
->uring_lock
);
2989 percpu_ref_kill(&ctx
->refs
);
2990 xa_for_each(&ctx
->personalities
, index
, creds
)
2991 io_unregister_personality(ctx
, index
);
2992 mutex_unlock(&ctx
->uring_lock
);
2994 flush_delayed_work(&ctx
->fallback_work
);
2996 INIT_WORK(&ctx
->exit_work
, io_ring_exit_work
);
2998 * Use system_unbound_wq to avoid spawning tons of event kworkers
2999 * if we're exiting a ton of rings at the same time. It just adds
3000 * noise and overhead, there's no discernable change in runtime
3001 * over using system_wq.
3003 queue_work(iou_wq
, &ctx
->exit_work
);
3006 static int io_uring_release(struct inode
*inode
, struct file
*file
)
3008 struct io_ring_ctx
*ctx
= file
->private_data
;
3010 file
->private_data
= NULL
;
3011 io_ring_ctx_wait_and_kill(ctx
);
3015 struct io_task_cancel
{
3016 struct task_struct
*task
;
3020 static bool io_cancel_task_cb(struct io_wq_work
*work
, void *data
)
3022 struct io_kiocb
*req
= container_of(work
, struct io_kiocb
, work
);
3023 struct io_task_cancel
*cancel
= data
;
3025 return io_match_task_safe(req
, cancel
->task
, cancel
->all
);
3028 static __cold
bool io_cancel_defer_files(struct io_ring_ctx
*ctx
,
3029 struct task_struct
*task
,
3032 struct io_defer_entry
*de
;
3035 spin_lock(&ctx
->completion_lock
);
3036 list_for_each_entry_reverse(de
, &ctx
->defer_list
, list
) {
3037 if (io_match_task_safe(de
->req
, task
, cancel_all
)) {
3038 list_cut_position(&list
, &ctx
->defer_list
, &de
->list
);
3042 spin_unlock(&ctx
->completion_lock
);
3043 if (list_empty(&list
))
3046 while (!list_empty(&list
)) {
3047 de
= list_first_entry(&list
, struct io_defer_entry
, list
);
3048 list_del_init(&de
->list
);
3049 io_req_task_queue_fail(de
->req
, -ECANCELED
);
3055 static __cold
bool io_uring_try_cancel_iowq(struct io_ring_ctx
*ctx
)
3057 struct io_tctx_node
*node
;
3058 enum io_wq_cancel cret
;
3061 mutex_lock(&ctx
->uring_lock
);
3062 list_for_each_entry(node
, &ctx
->tctx_list
, ctx_node
) {
3063 struct io_uring_task
*tctx
= node
->task
->io_uring
;
3066 * io_wq will stay alive while we hold uring_lock, because it's
3067 * killed after ctx nodes, which requires to take the lock.
3069 if (!tctx
|| !tctx
->io_wq
)
3071 cret
= io_wq_cancel_cb(tctx
->io_wq
, io_cancel_ctx_cb
, ctx
, true);
3072 ret
|= (cret
!= IO_WQ_CANCEL_NOTFOUND
);
3074 mutex_unlock(&ctx
->uring_lock
);
3079 static __cold
bool io_uring_try_cancel_requests(struct io_ring_ctx
*ctx
,
3080 struct task_struct
*task
,
3083 struct io_task_cancel cancel
= { .task
= task
, .all
= cancel_all
, };
3084 struct io_uring_task
*tctx
= task
? task
->io_uring
: NULL
;
3085 enum io_wq_cancel cret
;
3088 /* set it so io_req_local_work_add() would wake us up */
3089 if (ctx
->flags
& IORING_SETUP_DEFER_TASKRUN
) {
3090 atomic_set(&ctx
->cq_wait_nr
, 1);
3094 /* failed during ring init, it couldn't have issued any requests */
3099 ret
|= io_uring_try_cancel_iowq(ctx
);
3100 } else if (tctx
&& tctx
->io_wq
) {
3102 * Cancels requests of all rings, not only @ctx, but
3103 * it's fine as the task is in exit/exec.
3105 cret
= io_wq_cancel_cb(tctx
->io_wq
, io_cancel_task_cb
,
3107 ret
|= (cret
!= IO_WQ_CANCEL_NOTFOUND
);
3110 /* SQPOLL thread does its own polling */
3111 if ((!(ctx
->flags
& IORING_SETUP_SQPOLL
) && cancel_all
) ||
3112 (ctx
->sq_data
&& ctx
->sq_data
->thread
== current
)) {
3113 while (!wq_list_empty(&ctx
->iopoll_list
)) {
3114 io_iopoll_try_reap_events(ctx
);
3120 if ((ctx
->flags
& IORING_SETUP_DEFER_TASKRUN
) &&
3121 io_allowed_defer_tw_run(ctx
))
3122 ret
|= io_run_local_work(ctx
, INT_MAX
) > 0;
3123 ret
|= io_cancel_defer_files(ctx
, task
, cancel_all
);
3124 mutex_lock(&ctx
->uring_lock
);
3125 ret
|= io_poll_remove_all(ctx
, task
, cancel_all
);
3126 ret
|= io_waitid_remove_all(ctx
, task
, cancel_all
);
3127 ret
|= io_futex_remove_all(ctx
, task
, cancel_all
);
3128 ret
|= io_uring_try_cancel_uring_cmd(ctx
, task
, cancel_all
);
3129 mutex_unlock(&ctx
->uring_lock
);
3130 ret
|= io_kill_timeouts(ctx
, task
, cancel_all
);
3132 ret
|= io_run_task_work() > 0;
3134 ret
|= flush_delayed_work(&ctx
->fallback_work
);
3138 static s64
tctx_inflight(struct io_uring_task
*tctx
, bool tracked
)
3141 return atomic_read(&tctx
->inflight_tracked
);
3142 return percpu_counter_sum(&tctx
->inflight
);
3146 * Find any io_uring ctx that this task has registered or done IO on, and cancel
3147 * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation.
3149 __cold
void io_uring_cancel_generic(bool cancel_all
, struct io_sq_data
*sqd
)
3151 struct io_uring_task
*tctx
= current
->io_uring
;
3152 struct io_ring_ctx
*ctx
;
3153 struct io_tctx_node
*node
;
3154 unsigned long index
;
3158 WARN_ON_ONCE(sqd
&& sqd
->thread
!= current
);
3160 if (!current
->io_uring
)
3163 io_wq_exit_start(tctx
->io_wq
);
3165 atomic_inc(&tctx
->in_cancel
);
3169 io_uring_drop_tctx_refs(current
);
3170 if (!tctx_inflight(tctx
, !cancel_all
))
3173 /* read completions before cancelations */
3174 inflight
= tctx_inflight(tctx
, false);
3179 xa_for_each(&tctx
->xa
, index
, node
) {
3180 /* sqpoll task will cancel all its requests */
3181 if (node
->ctx
->sq_data
)
3183 loop
|= io_uring_try_cancel_requests(node
->ctx
,
3184 current
, cancel_all
);
3187 list_for_each_entry(ctx
, &sqd
->ctx_list
, sqd_list
)
3188 loop
|= io_uring_try_cancel_requests(ctx
,
3198 prepare_to_wait(&tctx
->wait
, &wait
, TASK_INTERRUPTIBLE
);
3200 io_uring_drop_tctx_refs(current
);
3201 xa_for_each(&tctx
->xa
, index
, node
) {
3202 if (!llist_empty(&node
->ctx
->work_llist
)) {
3203 WARN_ON_ONCE(node
->ctx
->submitter_task
&&
3204 node
->ctx
->submitter_task
!= current
);
3209 * If we've seen completions, retry without waiting. This
3210 * avoids a race where a completion comes in before we did
3211 * prepare_to_wait().
3213 if (inflight
== tctx_inflight(tctx
, !cancel_all
))
3216 finish_wait(&tctx
->wait
, &wait
);
3219 io_uring_clean_tctx(tctx
);
3222 * We shouldn't run task_works after cancel, so just leave
3223 * ->in_cancel set for normal exit.
3225 atomic_dec(&tctx
->in_cancel
);
3226 /* for exec all current's requests should be gone, kill tctx */
3227 __io_uring_free(current
);
3231 void __io_uring_cancel(bool cancel_all
)
3233 io_uring_cancel_generic(cancel_all
, NULL
);
3236 static int io_validate_ext_arg(unsigned flags
, const void __user
*argp
, size_t argsz
)
3238 if (flags
& IORING_ENTER_EXT_ARG
) {
3239 struct io_uring_getevents_arg arg
;
3241 if (argsz
!= sizeof(arg
))
3243 if (copy_from_user(&arg
, argp
, sizeof(arg
)))
3249 static int io_get_ext_arg(unsigned flags
, const void __user
*argp
,
3250 struct ext_arg
*ext_arg
)
3252 struct io_uring_getevents_arg arg
;
3255 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
3256 * is just a pointer to the sigset_t.
3258 if (!(flags
& IORING_ENTER_EXT_ARG
)) {
3259 ext_arg
->sig
= (const sigset_t __user
*) argp
;
3265 * EXT_ARG is set - ensure we agree on the size of it and copy in our
3266 * timespec and sigset_t pointers if good.
3268 if (ext_arg
->argsz
!= sizeof(arg
))
3270 if (copy_from_user(&arg
, argp
, sizeof(arg
)))
3272 ext_arg
->min_time
= arg
.min_wait_usec
* NSEC_PER_USEC
;
3273 ext_arg
->sig
= u64_to_user_ptr(arg
.sigmask
);
3274 ext_arg
->argsz
= arg
.sigmask_sz
;
3275 ext_arg
->ts
= u64_to_user_ptr(arg
.ts
);
3279 SYSCALL_DEFINE6(io_uring_enter
, unsigned int, fd
, u32
, to_submit
,
3280 u32
, min_complete
, u32
, flags
, const void __user
*, argp
,
3283 struct io_ring_ctx
*ctx
;
3287 if (unlikely(flags
& ~(IORING_ENTER_GETEVENTS
| IORING_ENTER_SQ_WAKEUP
|
3288 IORING_ENTER_SQ_WAIT
| IORING_ENTER_EXT_ARG
|
3289 IORING_ENTER_REGISTERED_RING
|
3290 IORING_ENTER_ABS_TIMER
)))
3294 * Ring fd has been registered via IORING_REGISTER_RING_FDS, we
3295 * need only dereference our task private array to find it.
3297 if (flags
& IORING_ENTER_REGISTERED_RING
) {
3298 struct io_uring_task
*tctx
= current
->io_uring
;
3300 if (unlikely(!tctx
|| fd
>= IO_RINGFD_REG_MAX
))
3302 fd
= array_index_nospec(fd
, IO_RINGFD_REG_MAX
);
3303 file
= tctx
->registered_rings
[fd
];
3304 if (unlikely(!file
))
3308 if (unlikely(!file
))
3311 if (unlikely(!io_is_uring_fops(file
)))
3315 ctx
= file
->private_data
;
3317 if (unlikely(ctx
->flags
& IORING_SETUP_R_DISABLED
))
3321 * For SQ polling, the thread will do all submissions and completions.
3322 * Just return the requested submit count, and wake the thread if
3326 if (ctx
->flags
& IORING_SETUP_SQPOLL
) {
3327 if (unlikely(ctx
->sq_data
->thread
== NULL
)) {
3331 if (flags
& IORING_ENTER_SQ_WAKEUP
)
3332 wake_up(&ctx
->sq_data
->wait
);
3333 if (flags
& IORING_ENTER_SQ_WAIT
)
3334 io_sqpoll_wait_sq(ctx
);
3337 } else if (to_submit
) {
3338 ret
= io_uring_add_tctx_node(ctx
);
3342 mutex_lock(&ctx
->uring_lock
);
3343 ret
= io_submit_sqes(ctx
, to_submit
);
3344 if (ret
!= to_submit
) {
3345 mutex_unlock(&ctx
->uring_lock
);
3348 if (flags
& IORING_ENTER_GETEVENTS
) {
3349 if (ctx
->syscall_iopoll
)
3352 * Ignore errors, we'll soon call io_cqring_wait() and
3353 * it should handle ownership problems if any.
3355 if (ctx
->flags
& IORING_SETUP_DEFER_TASKRUN
)
3356 (void)io_run_local_work_locked(ctx
, min_complete
);
3358 mutex_unlock(&ctx
->uring_lock
);
3361 if (flags
& IORING_ENTER_GETEVENTS
) {
3364 if (ctx
->syscall_iopoll
) {
3366 * We disallow the app entering submit/complete with
3367 * polling, but we still need to lock the ring to
3368 * prevent racing with polled issue that got punted to
3371 mutex_lock(&ctx
->uring_lock
);
3373 ret2
= io_validate_ext_arg(flags
, argp
, argsz
);
3374 if (likely(!ret2
)) {
3375 min_complete
= min(min_complete
,
3377 ret2
= io_iopoll_check(ctx
, min_complete
);
3379 mutex_unlock(&ctx
->uring_lock
);
3381 struct ext_arg ext_arg
= { .argsz
= argsz
};
3383 ret2
= io_get_ext_arg(flags
, argp
, &ext_arg
);
3384 if (likely(!ret2
)) {
3385 min_complete
= min(min_complete
,
3387 ret2
= io_cqring_wait(ctx
, min_complete
, flags
,
3396 * EBADR indicates that one or more CQE were dropped.
3397 * Once the user has been informed we can clear the bit
3398 * as they are obviously ok with those drops.
3400 if (unlikely(ret2
== -EBADR
))
3401 clear_bit(IO_CHECK_CQ_DROPPED_BIT
,
3406 if (!(flags
& IORING_ENTER_REGISTERED_RING
))
3411 static const struct file_operations io_uring_fops
= {
3412 .release
= io_uring_release
,
3413 .mmap
= io_uring_mmap
,
3414 .get_unmapped_area
= io_uring_get_unmapped_area
,
3416 .mmap_capabilities
= io_uring_nommu_mmap_capabilities
,
3418 .poll
= io_uring_poll
,
3419 #ifdef CONFIG_PROC_FS
3420 .show_fdinfo
= io_uring_show_fdinfo
,
3424 bool io_is_uring_fops(struct file
*file
)
3426 return file
->f_op
== &io_uring_fops
;
3429 static __cold
int io_allocate_scq_urings(struct io_ring_ctx
*ctx
,
3430 struct io_uring_params
*p
)
3432 struct io_rings
*rings
;
3433 size_t size
, sq_array_offset
;
3436 /* make sure these are sane, as we already accounted them */
3437 ctx
->sq_entries
= p
->sq_entries
;
3438 ctx
->cq_entries
= p
->cq_entries
;
3440 size
= rings_size(ctx
, p
->sq_entries
, p
->cq_entries
, &sq_array_offset
);
3441 if (size
== SIZE_MAX
)
3444 if (!(ctx
->flags
& IORING_SETUP_NO_MMAP
))
3445 rings
= io_pages_map(&ctx
->ring_pages
, &ctx
->n_ring_pages
, size
);
3447 rings
= io_rings_map(ctx
, p
->cq_off
.user_addr
, size
);
3450 return PTR_ERR(rings
);
3453 if (!(ctx
->flags
& IORING_SETUP_NO_SQARRAY
))
3454 ctx
->sq_array
= (u32
*)((char *)rings
+ sq_array_offset
);
3455 rings
->sq_ring_mask
= p
->sq_entries
- 1;
3456 rings
->cq_ring_mask
= p
->cq_entries
- 1;
3457 rings
->sq_ring_entries
= p
->sq_entries
;
3458 rings
->cq_ring_entries
= p
->cq_entries
;
3460 if (p
->flags
& IORING_SETUP_SQE128
)
3461 size
= array_size(2 * sizeof(struct io_uring_sqe
), p
->sq_entries
);
3463 size
= array_size(sizeof(struct io_uring_sqe
), p
->sq_entries
);
3464 if (size
== SIZE_MAX
) {
3469 if (!(ctx
->flags
& IORING_SETUP_NO_MMAP
))
3470 ptr
= io_pages_map(&ctx
->sqe_pages
, &ctx
->n_sqe_pages
, size
);
3472 ptr
= io_sqes_map(ctx
, p
->sq_off
.user_addr
, size
);
3476 return PTR_ERR(ptr
);
3483 static int io_uring_install_fd(struct file
*file
)
3487 fd
= get_unused_fd_flags(O_RDWR
| O_CLOEXEC
);
3490 fd_install(fd
, file
);
3495 * Allocate an anonymous fd, this is what constitutes the application
3496 * visible backing of an io_uring instance. The application mmaps this
3497 * fd to gain access to the SQ/CQ ring details.
3499 static struct file
*io_uring_get_file(struct io_ring_ctx
*ctx
)
3501 /* Create a new inode so that the LSM can block the creation. */
3502 return anon_inode_create_getfile("[io_uring]", &io_uring_fops
, ctx
,
3503 O_RDWR
| O_CLOEXEC
, NULL
);
3506 static __cold
int io_uring_create(unsigned entries
, struct io_uring_params
*p
,
3507 struct io_uring_params __user
*params
)
3509 struct io_ring_ctx
*ctx
;
3510 struct io_uring_task
*tctx
;
3516 if (entries
> IORING_MAX_ENTRIES
) {
3517 if (!(p
->flags
& IORING_SETUP_CLAMP
))
3519 entries
= IORING_MAX_ENTRIES
;
3522 if ((p
->flags
& IORING_SETUP_REGISTERED_FD_ONLY
)
3523 && !(p
->flags
& IORING_SETUP_NO_MMAP
))
3527 * Use twice as many entries for the CQ ring. It's possible for the
3528 * application to drive a higher depth than the size of the SQ ring,
3529 * since the sqes are only used at submission time. This allows for
3530 * some flexibility in overcommitting a bit. If the application has
3531 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
3532 * of CQ ring entries manually.
3534 p
->sq_entries
= roundup_pow_of_two(entries
);
3535 if (p
->flags
& IORING_SETUP_CQSIZE
) {
3537 * If IORING_SETUP_CQSIZE is set, we do the same roundup
3538 * to a power-of-two, if it isn't already. We do NOT impose
3539 * any cq vs sq ring sizing.
3543 if (p
->cq_entries
> IORING_MAX_CQ_ENTRIES
) {
3544 if (!(p
->flags
& IORING_SETUP_CLAMP
))
3546 p
->cq_entries
= IORING_MAX_CQ_ENTRIES
;
3548 p
->cq_entries
= roundup_pow_of_two(p
->cq_entries
);
3549 if (p
->cq_entries
< p
->sq_entries
)
3552 p
->cq_entries
= 2 * p
->sq_entries
;
3555 ctx
= io_ring_ctx_alloc(p
);
3559 ctx
->clockid
= CLOCK_MONOTONIC
;
3560 ctx
->clock_offset
= 0;
3562 if ((ctx
->flags
& IORING_SETUP_DEFER_TASKRUN
) &&
3563 !(ctx
->flags
& IORING_SETUP_IOPOLL
) &&
3564 !(ctx
->flags
& IORING_SETUP_SQPOLL
))
3565 ctx
->task_complete
= true;
3567 if (ctx
->task_complete
|| (ctx
->flags
& IORING_SETUP_IOPOLL
))
3568 ctx
->lockless_cq
= true;
3571 * lazy poll_wq activation relies on ->task_complete for synchronisation
3572 * purposes, see io_activate_pollwq()
3574 if (!ctx
->task_complete
)
3575 ctx
->poll_activated
= true;
3578 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
3579 * space applications don't need to do io completion events
3580 * polling again, they can rely on io_sq_thread to do polling
3581 * work, which can reduce cpu usage and uring_lock contention.
3583 if (ctx
->flags
& IORING_SETUP_IOPOLL
&&
3584 !(ctx
->flags
& IORING_SETUP_SQPOLL
))
3585 ctx
->syscall_iopoll
= 1;
3587 ctx
->compat
= in_compat_syscall();
3588 if (!ns_capable_noaudit(&init_user_ns
, CAP_IPC_LOCK
))
3589 ctx
->user
= get_uid(current_user());
3592 * For SQPOLL, we just need a wakeup, always. For !SQPOLL, if
3593 * COOP_TASKRUN is set, then IPIs are never needed by the app.
3596 if (ctx
->flags
& IORING_SETUP_SQPOLL
) {
3597 /* IPI related flags don't make sense with SQPOLL */
3598 if (ctx
->flags
& (IORING_SETUP_COOP_TASKRUN
|
3599 IORING_SETUP_TASKRUN_FLAG
|
3600 IORING_SETUP_DEFER_TASKRUN
))
3602 ctx
->notify_method
= TWA_SIGNAL_NO_IPI
;
3603 } else if (ctx
->flags
& IORING_SETUP_COOP_TASKRUN
) {
3604 ctx
->notify_method
= TWA_SIGNAL_NO_IPI
;
3606 if (ctx
->flags
& IORING_SETUP_TASKRUN_FLAG
&&
3607 !(ctx
->flags
& IORING_SETUP_DEFER_TASKRUN
))
3609 ctx
->notify_method
= TWA_SIGNAL
;
3613 * For DEFER_TASKRUN we require the completion task to be the same as the
3614 * submission task. This implies that there is only one submitter, so enforce
3617 if (ctx
->flags
& IORING_SETUP_DEFER_TASKRUN
&&
3618 !(ctx
->flags
& IORING_SETUP_SINGLE_ISSUER
)) {
3623 * This is just grabbed for accounting purposes. When a process exits,
3624 * the mm is exited and dropped before the files, hence we need to hang
3625 * on to this mm purely for the purposes of being able to unaccount
3626 * memory (locked/pinned vm). It's not used for anything else.
3628 mmgrab(current
->mm
);
3629 ctx
->mm_account
= current
->mm
;
3631 ret
= io_allocate_scq_urings(ctx
, p
);
3635 ret
= io_sq_offload_create(ctx
, p
);
3639 ret
= io_rsrc_init(ctx
);
3643 p
->sq_off
.head
= offsetof(struct io_rings
, sq
.head
);
3644 p
->sq_off
.tail
= offsetof(struct io_rings
, sq
.tail
);
3645 p
->sq_off
.ring_mask
= offsetof(struct io_rings
, sq_ring_mask
);
3646 p
->sq_off
.ring_entries
= offsetof(struct io_rings
, sq_ring_entries
);
3647 p
->sq_off
.flags
= offsetof(struct io_rings
, sq_flags
);
3648 p
->sq_off
.dropped
= offsetof(struct io_rings
, sq_dropped
);
3649 if (!(ctx
->flags
& IORING_SETUP_NO_SQARRAY
))
3650 p
->sq_off
.array
= (char *)ctx
->sq_array
- (char *)ctx
->rings
;
3651 p
->sq_off
.resv1
= 0;
3652 if (!(ctx
->flags
& IORING_SETUP_NO_MMAP
))
3653 p
->sq_off
.user_addr
= 0;
3655 p
->cq_off
.head
= offsetof(struct io_rings
, cq
.head
);
3656 p
->cq_off
.tail
= offsetof(struct io_rings
, cq
.tail
);
3657 p
->cq_off
.ring_mask
= offsetof(struct io_rings
, cq_ring_mask
);
3658 p
->cq_off
.ring_entries
= offsetof(struct io_rings
, cq_ring_entries
);
3659 p
->cq_off
.overflow
= offsetof(struct io_rings
, cq_overflow
);
3660 p
->cq_off
.cqes
= offsetof(struct io_rings
, cqes
);
3661 p
->cq_off
.flags
= offsetof(struct io_rings
, cq_flags
);
3662 p
->cq_off
.resv1
= 0;
3663 if (!(ctx
->flags
& IORING_SETUP_NO_MMAP
))
3664 p
->cq_off
.user_addr
= 0;
3666 p
->features
= IORING_FEAT_SINGLE_MMAP
| IORING_FEAT_NODROP
|
3667 IORING_FEAT_SUBMIT_STABLE
| IORING_FEAT_RW_CUR_POS
|
3668 IORING_FEAT_CUR_PERSONALITY
| IORING_FEAT_FAST_POLL
|
3669 IORING_FEAT_POLL_32BITS
| IORING_FEAT_SQPOLL_NONFIXED
|
3670 IORING_FEAT_EXT_ARG
| IORING_FEAT_NATIVE_WORKERS
|
3671 IORING_FEAT_RSRC_TAGS
| IORING_FEAT_CQE_SKIP
|
3672 IORING_FEAT_LINKED_FILE
| IORING_FEAT_REG_REG_RING
|
3673 IORING_FEAT_RECVSEND_BUNDLE
| IORING_FEAT_MIN_TIMEOUT
;
3675 if (copy_to_user(params
, p
, sizeof(*p
))) {
3680 if (ctx
->flags
& IORING_SETUP_SINGLE_ISSUER
3681 && !(ctx
->flags
& IORING_SETUP_R_DISABLED
))
3682 WRITE_ONCE(ctx
->submitter_task
, get_task_struct(current
));
3684 file
= io_uring_get_file(ctx
);
3686 ret
= PTR_ERR(file
);
3690 ret
= __io_uring_add_tctx_node(ctx
);
3693 tctx
= current
->io_uring
;
3696 * Install ring fd as the very last thing, so we don't risk someone
3697 * having closed it before we finish setup
3699 if (p
->flags
& IORING_SETUP_REGISTERED_FD_ONLY
)
3700 ret
= io_ring_add_registered_file(tctx
, file
, 0, IO_RINGFD_REG_MAX
);
3702 ret
= io_uring_install_fd(file
);
3706 trace_io_uring_create(ret
, ctx
, p
->sq_entries
, p
->cq_entries
, p
->flags
);
3709 io_ring_ctx_wait_and_kill(ctx
);
3717 * Sets up an aio uring context, and returns the fd. Applications asks for a
3718 * ring size, we return the actual sq/cq ring sizes (among other things) in the
3719 * params structure passed in.
3721 static long io_uring_setup(u32 entries
, struct io_uring_params __user
*params
)
3723 struct io_uring_params p
;
3726 if (copy_from_user(&p
, params
, sizeof(p
)))
3728 for (i
= 0; i
< ARRAY_SIZE(p
.resv
); i
++) {
3733 if (p
.flags
& ~(IORING_SETUP_IOPOLL
| IORING_SETUP_SQPOLL
|
3734 IORING_SETUP_SQ_AFF
| IORING_SETUP_CQSIZE
|
3735 IORING_SETUP_CLAMP
| IORING_SETUP_ATTACH_WQ
|
3736 IORING_SETUP_R_DISABLED
| IORING_SETUP_SUBMIT_ALL
|
3737 IORING_SETUP_COOP_TASKRUN
| IORING_SETUP_TASKRUN_FLAG
|
3738 IORING_SETUP_SQE128
| IORING_SETUP_CQE32
|
3739 IORING_SETUP_SINGLE_ISSUER
| IORING_SETUP_DEFER_TASKRUN
|
3740 IORING_SETUP_NO_MMAP
| IORING_SETUP_REGISTERED_FD_ONLY
|
3741 IORING_SETUP_NO_SQARRAY
))
3744 return io_uring_create(entries
, &p
, params
);
3747 static inline bool io_uring_allowed(void)
3749 int disabled
= READ_ONCE(sysctl_io_uring_disabled
);
3750 kgid_t io_uring_group
;
3755 if (disabled
== 0 || capable(CAP_SYS_ADMIN
))
3758 io_uring_group
= make_kgid(&init_user_ns
, sysctl_io_uring_group
);
3759 if (!gid_valid(io_uring_group
))
3762 return in_group_p(io_uring_group
);
3765 SYSCALL_DEFINE2(io_uring_setup
, u32
, entries
,
3766 struct io_uring_params __user
*, params
)
3768 if (!io_uring_allowed())
3771 return io_uring_setup(entries
, params
);
3774 static int __init
io_uring_init(void)
3776 struct kmem_cache_args kmem_args
= {
3777 .useroffset
= offsetof(struct io_kiocb
, cmd
.data
),
3778 .usersize
= sizeof_field(struct io_kiocb
, cmd
.data
),
3781 #define __BUILD_BUG_VERIFY_OFFSET_SIZE(stype, eoffset, esize, ename) do { \
3782 BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
3783 BUILD_BUG_ON(sizeof_field(stype, ename) != esize); \
3786 #define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
3787 __BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, sizeof(etype), ename)
3788 #define BUILD_BUG_SQE_ELEM_SIZE(eoffset, esize, ename) \
3789 __BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, esize, ename)
3790 BUILD_BUG_ON(sizeof(struct io_uring_sqe
) != 64);
3791 BUILD_BUG_SQE_ELEM(0, __u8
, opcode
);
3792 BUILD_BUG_SQE_ELEM(1, __u8
, flags
);
3793 BUILD_BUG_SQE_ELEM(2, __u16
, ioprio
);
3794 BUILD_BUG_SQE_ELEM(4, __s32
, fd
);
3795 BUILD_BUG_SQE_ELEM(8, __u64
, off
);
3796 BUILD_BUG_SQE_ELEM(8, __u64
, addr2
);
3797 BUILD_BUG_SQE_ELEM(8, __u32
, cmd_op
);
3798 BUILD_BUG_SQE_ELEM(12, __u32
, __pad1
);
3799 BUILD_BUG_SQE_ELEM(16, __u64
, addr
);
3800 BUILD_BUG_SQE_ELEM(16, __u64
, splice_off_in
);
3801 BUILD_BUG_SQE_ELEM(24, __u32
, len
);
3802 BUILD_BUG_SQE_ELEM(28, __kernel_rwf_t
, rw_flags
);
3803 BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags
);
3804 BUILD_BUG_SQE_ELEM(28, /* compat */ __u32
, rw_flags
);
3805 BUILD_BUG_SQE_ELEM(28, __u32
, fsync_flags
);
3806 BUILD_BUG_SQE_ELEM(28, /* compat */ __u16
, poll_events
);
3807 BUILD_BUG_SQE_ELEM(28, __u32
, poll32_events
);
3808 BUILD_BUG_SQE_ELEM(28, __u32
, sync_range_flags
);
3809 BUILD_BUG_SQE_ELEM(28, __u32
, msg_flags
);
3810 BUILD_BUG_SQE_ELEM(28, __u32
, timeout_flags
);
3811 BUILD_BUG_SQE_ELEM(28, __u32
, accept_flags
);
3812 BUILD_BUG_SQE_ELEM(28, __u32
, cancel_flags
);
3813 BUILD_BUG_SQE_ELEM(28, __u32
, open_flags
);
3814 BUILD_BUG_SQE_ELEM(28, __u32
, statx_flags
);
3815 BUILD_BUG_SQE_ELEM(28, __u32
, fadvise_advice
);
3816 BUILD_BUG_SQE_ELEM(28, __u32
, splice_flags
);
3817 BUILD_BUG_SQE_ELEM(28, __u32
, rename_flags
);
3818 BUILD_BUG_SQE_ELEM(28, __u32
, unlink_flags
);
3819 BUILD_BUG_SQE_ELEM(28, __u32
, hardlink_flags
);
3820 BUILD_BUG_SQE_ELEM(28, __u32
, xattr_flags
);
3821 BUILD_BUG_SQE_ELEM(28, __u32
, msg_ring_flags
);
3822 BUILD_BUG_SQE_ELEM(32, __u64
, user_data
);
3823 BUILD_BUG_SQE_ELEM(40, __u16
, buf_index
);
3824 BUILD_BUG_SQE_ELEM(40, __u16
, buf_group
);
3825 BUILD_BUG_SQE_ELEM(42, __u16
, personality
);
3826 BUILD_BUG_SQE_ELEM(44, __s32
, splice_fd_in
);
3827 BUILD_BUG_SQE_ELEM(44, __u32
, file_index
);
3828 BUILD_BUG_SQE_ELEM(44, __u16
, addr_len
);
3829 BUILD_BUG_SQE_ELEM(46, __u16
, __pad3
[0]);
3830 BUILD_BUG_SQE_ELEM(48, __u64
, addr3
);
3831 BUILD_BUG_SQE_ELEM_SIZE(48, 0, cmd
);
3832 BUILD_BUG_SQE_ELEM(56, __u64
, __pad2
);
3834 BUILD_BUG_ON(sizeof(struct io_uring_files_update
) !=
3835 sizeof(struct io_uring_rsrc_update
));
3836 BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update
) >
3837 sizeof(struct io_uring_rsrc_update2
));
3839 /* ->buf_index is u16 */
3840 BUILD_BUG_ON(offsetof(struct io_uring_buf_ring
, bufs
) != 0);
3841 BUILD_BUG_ON(offsetof(struct io_uring_buf
, resv
) !=
3842 offsetof(struct io_uring_buf_ring
, tail
));
3844 /* should fit into one byte */
3845 BUILD_BUG_ON(SQE_VALID_FLAGS
>= (1 << 8));
3846 BUILD_BUG_ON(SQE_COMMON_FLAGS
>= (1 << 8));
3847 BUILD_BUG_ON((SQE_VALID_FLAGS
| SQE_COMMON_FLAGS
) != SQE_VALID_FLAGS
);
3849 BUILD_BUG_ON(__REQ_F_LAST_BIT
> 8 * sizeof_field(struct io_kiocb
, flags
));
3851 BUILD_BUG_ON(sizeof(atomic_t
) != sizeof(u32
));
3853 /* top 8bits are for internal use */
3854 BUILD_BUG_ON((IORING_URING_CMD_MASK
& 0xff000000) != 0);
3856 io_uring_optable_init();
3859 * Allow user copy in the per-command field, which starts after the
3860 * file in io_kiocb and until the opcode field. The openat2 handling
3861 * requires copying in user memory into the io_kiocb object in that
3862 * range, and HARDENED_USERCOPY will complain if we haven't
3863 * correctly annotated this range.
3865 req_cachep
= kmem_cache_create("io_kiocb", sizeof(struct io_kiocb
), &kmem_args
,
3866 SLAB_HWCACHE_ALIGN
| SLAB_PANIC
| SLAB_ACCOUNT
|
3867 SLAB_TYPESAFE_BY_RCU
);
3868 io_buf_cachep
= KMEM_CACHE(io_buffer
,
3869 SLAB_HWCACHE_ALIGN
| SLAB_PANIC
| SLAB_ACCOUNT
);
3871 iou_wq
= alloc_workqueue("iou_exit", WQ_UNBOUND
, 64);
3873 #ifdef CONFIG_SYSCTL
3874 register_sysctl_init("kernel", kernel_io_uring_disabled_table
);
3879 __initcall(io_uring_init
);