1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/io_uring/cmd.h>
6 #include <linux/io_uring/net.h>
7 #include <linux/security.h>
8 #include <linux/nospec.h>
11 #include <uapi/linux/io_uring.h>
12 #include <asm/ioctls.h>
15 #include "alloc_cache.h"
17 #include "uring_cmd.h"
19 static struct uring_cache
*io_uring_async_get(struct io_kiocb
*req
)
21 struct io_ring_ctx
*ctx
= req
->ctx
;
22 struct uring_cache
*cache
;
24 cache
= io_alloc_cache_get(&ctx
->uring_cache
);
26 req
->flags
|= REQ_F_ASYNC_DATA
;
27 req
->async_data
= cache
;
30 if (!io_alloc_async_data(req
))
31 return req
->async_data
;
35 static void io_req_uring_cleanup(struct io_kiocb
*req
, unsigned int issue_flags
)
37 struct io_uring_cmd
*ioucmd
= io_kiocb_to_cmd(req
, struct io_uring_cmd
);
38 struct uring_cache
*cache
= req
->async_data
;
40 if (issue_flags
& IO_URING_F_UNLOCKED
)
42 if (io_alloc_cache_put(&req
->ctx
->uring_cache
, cache
)) {
44 req
->async_data
= NULL
;
45 req
->flags
&= ~REQ_F_ASYNC_DATA
;
49 bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx
*ctx
,
50 struct io_uring_task
*tctx
, bool cancel_all
)
52 struct hlist_node
*tmp
;
56 lockdep_assert_held(&ctx
->uring_lock
);
58 hlist_for_each_entry_safe(req
, tmp
, &ctx
->cancelable_uring_cmd
,
60 struct io_uring_cmd
*cmd
= io_kiocb_to_cmd(req
,
62 struct file
*file
= req
->file
;
64 if (!cancel_all
&& req
->tctx
!= tctx
)
67 if (cmd
->flags
& IORING_URING_CMD_CANCELABLE
) {
68 /* ->sqe isn't available if no async data */
69 if (!req_has_async_data(req
))
71 file
->f_op
->uring_cmd(cmd
, IO_URING_F_CANCEL
|
72 IO_URING_F_COMPLETE_DEFER
);
76 io_submit_flush_completions(ctx
);
80 static void io_uring_cmd_del_cancelable(struct io_uring_cmd
*cmd
,
81 unsigned int issue_flags
)
83 struct io_kiocb
*req
= cmd_to_io_kiocb(cmd
);
84 struct io_ring_ctx
*ctx
= req
->ctx
;
86 if (!(cmd
->flags
& IORING_URING_CMD_CANCELABLE
))
89 cmd
->flags
&= ~IORING_URING_CMD_CANCELABLE
;
90 io_ring_submit_lock(ctx
, issue_flags
);
91 hlist_del(&req
->hash_node
);
92 io_ring_submit_unlock(ctx
, issue_flags
);
96 * Mark this command as concelable, then io_uring_try_cancel_uring_cmd()
97 * will try to cancel this issued command by sending ->uring_cmd() with
98 * issue_flags of IO_URING_F_CANCEL.
100 * The command is guaranteed to not be done when calling ->uring_cmd()
101 * with IO_URING_F_CANCEL, but it is driver's responsibility to deal
102 * with race between io_uring canceling and normal completion.
104 void io_uring_cmd_mark_cancelable(struct io_uring_cmd
*cmd
,
105 unsigned int issue_flags
)
107 struct io_kiocb
*req
= cmd_to_io_kiocb(cmd
);
108 struct io_ring_ctx
*ctx
= req
->ctx
;
110 if (!(cmd
->flags
& IORING_URING_CMD_CANCELABLE
)) {
111 cmd
->flags
|= IORING_URING_CMD_CANCELABLE
;
112 io_ring_submit_lock(ctx
, issue_flags
);
113 hlist_add_head(&req
->hash_node
, &ctx
->cancelable_uring_cmd
);
114 io_ring_submit_unlock(ctx
, issue_flags
);
117 EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable
);
119 static void io_uring_cmd_work(struct io_kiocb
*req
, struct io_tw_state
*ts
)
121 struct io_uring_cmd
*ioucmd
= io_kiocb_to_cmd(req
, struct io_uring_cmd
);
122 unsigned int flags
= IO_URING_F_COMPLETE_DEFER
;
124 if (current
->flags
& (PF_EXITING
| PF_KTHREAD
))
125 flags
|= IO_URING_F_TASK_DEAD
;
127 /* task_work executor checks the deffered list completion */
128 ioucmd
->task_work_cb(ioucmd
, flags
);
131 void __io_uring_cmd_do_in_task(struct io_uring_cmd
*ioucmd
,
132 void (*task_work_cb
)(struct io_uring_cmd
*, unsigned),
135 struct io_kiocb
*req
= cmd_to_io_kiocb(ioucmd
);
137 ioucmd
->task_work_cb
= task_work_cb
;
138 req
->io_task_work
.func
= io_uring_cmd_work
;
139 __io_req_task_work_add(req
, flags
);
141 EXPORT_SYMBOL_GPL(__io_uring_cmd_do_in_task
);
143 static inline void io_req_set_cqe32_extra(struct io_kiocb
*req
,
144 u64 extra1
, u64 extra2
)
146 req
->big_cqe
.extra1
= extra1
;
147 req
->big_cqe
.extra2
= extra2
;
151 * Called by consumers of io_uring_cmd, if they originally returned
152 * -EIOCBQUEUED upon receiving the command.
154 void io_uring_cmd_done(struct io_uring_cmd
*ioucmd
, ssize_t ret
, u64 res2
,
155 unsigned issue_flags
)
157 struct io_kiocb
*req
= cmd_to_io_kiocb(ioucmd
);
159 io_uring_cmd_del_cancelable(ioucmd
, issue_flags
);
164 io_req_set_res(req
, ret
, 0);
165 if (req
->ctx
->flags
& IORING_SETUP_CQE32
)
166 io_req_set_cqe32_extra(req
, res2
, 0);
167 io_req_uring_cleanup(req
, issue_flags
);
168 if (req
->ctx
->flags
& IORING_SETUP_IOPOLL
) {
169 /* order with io_iopoll_req_issued() checking ->iopoll_complete */
170 smp_store_release(&req
->iopoll_completed
, 1);
171 } else if (issue_flags
& IO_URING_F_COMPLETE_DEFER
) {
172 if (WARN_ON_ONCE(issue_flags
& IO_URING_F_UNLOCKED
))
174 io_req_complete_defer(req
);
176 req
->io_task_work
.func
= io_req_task_complete
;
177 io_req_task_work_add(req
);
180 EXPORT_SYMBOL_GPL(io_uring_cmd_done
);
182 static int io_uring_cmd_prep_setup(struct io_kiocb
*req
,
183 const struct io_uring_sqe
*sqe
)
185 struct io_uring_cmd
*ioucmd
= io_kiocb_to_cmd(req
, struct io_uring_cmd
);
186 struct uring_cache
*cache
;
188 cache
= io_uring_async_get(req
);
189 if (unlikely(!cache
))
192 if (!(req
->flags
& REQ_F_FORCE_ASYNC
)) {
193 /* defer memcpy until we need it */
198 memcpy(req
->async_data
, sqe
, uring_sqe_size(req
->ctx
));
199 ioucmd
->sqe
= req
->async_data
;
203 int io_uring_cmd_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
205 struct io_uring_cmd
*ioucmd
= io_kiocb_to_cmd(req
, struct io_uring_cmd
);
210 ioucmd
->flags
= READ_ONCE(sqe
->uring_cmd_flags
);
211 if (ioucmd
->flags
& ~IORING_URING_CMD_MASK
)
214 if (ioucmd
->flags
& IORING_URING_CMD_FIXED
) {
215 struct io_ring_ctx
*ctx
= req
->ctx
;
216 struct io_rsrc_node
*node
;
217 u16 index
= READ_ONCE(sqe
->buf_index
);
219 node
= io_rsrc_node_lookup(&ctx
->buf_table
, index
);
223 * Pi node upfront, prior to io_uring_cmd_import_fixed()
224 * being called. This prevents destruction of the mapped buffer
225 * we'll need at actual import time.
227 io_req_assign_buf_node(req
, node
);
229 ioucmd
->cmd_op
= READ_ONCE(sqe
->cmd_op
);
231 return io_uring_cmd_prep_setup(req
, sqe
);
234 int io_uring_cmd(struct io_kiocb
*req
, unsigned int issue_flags
)
236 struct io_uring_cmd
*ioucmd
= io_kiocb_to_cmd(req
, struct io_uring_cmd
);
237 struct io_ring_ctx
*ctx
= req
->ctx
;
238 struct file
*file
= req
->file
;
241 if (!file
->f_op
->uring_cmd
)
244 ret
= security_uring_cmd(ioucmd
);
248 if (ctx
->flags
& IORING_SETUP_SQE128
)
249 issue_flags
|= IO_URING_F_SQE128
;
250 if (ctx
->flags
& IORING_SETUP_CQE32
)
251 issue_flags
|= IO_URING_F_CQE32
;
253 issue_flags
|= IO_URING_F_COMPAT
;
254 if (ctx
->flags
& IORING_SETUP_IOPOLL
) {
255 if (!file
->f_op
->uring_cmd_iopoll
)
257 issue_flags
|= IO_URING_F_IOPOLL
;
258 req
->iopoll_completed
= 0;
261 ret
= file
->f_op
->uring_cmd(ioucmd
, issue_flags
);
262 if (ret
== -EAGAIN
) {
263 struct uring_cache
*cache
= req
->async_data
;
265 if (ioucmd
->sqe
!= (void *) cache
)
266 memcpy(cache
, ioucmd
->sqe
, uring_sqe_size(req
->ctx
));
268 } else if (ret
== -EIOCBQUEUED
) {
274 io_req_uring_cleanup(req
, issue_flags
);
275 io_req_set_res(req
, ret
, 0);
279 int io_uring_cmd_import_fixed(u64 ubuf
, unsigned long len
, int rw
,
280 struct iov_iter
*iter
, void *ioucmd
)
282 struct io_kiocb
*req
= cmd_to_io_kiocb(ioucmd
);
283 struct io_rsrc_node
*node
= req
->buf_node
;
285 /* Must have had rsrc_node assigned at prep time */
287 return io_import_fixed(rw
, iter
, node
->buf
, ubuf
, len
);
291 EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed
);
293 void io_uring_cmd_issue_blocking(struct io_uring_cmd
*ioucmd
)
295 struct io_kiocb
*req
= cmd_to_io_kiocb(ioucmd
);
297 io_req_queue_iowq(req
);
300 static inline int io_uring_cmd_getsockopt(struct socket
*sock
,
301 struct io_uring_cmd
*cmd
,
302 unsigned int issue_flags
)
304 bool compat
= !!(issue_flags
& IO_URING_F_COMPAT
);
305 int optlen
, optname
, level
, err
;
308 level
= READ_ONCE(cmd
->sqe
->level
);
309 if (level
!= SOL_SOCKET
)
312 optval
= u64_to_user_ptr(READ_ONCE(cmd
->sqe
->optval
));
313 optname
= READ_ONCE(cmd
->sqe
->optname
);
314 optlen
= READ_ONCE(cmd
->sqe
->optlen
);
316 err
= do_sock_getsockopt(sock
, compat
, level
, optname
,
317 USER_SOCKPTR(optval
),
318 KERNEL_SOCKPTR(&optlen
));
322 /* On success, return optlen */
326 static inline int io_uring_cmd_setsockopt(struct socket
*sock
,
327 struct io_uring_cmd
*cmd
,
328 unsigned int issue_flags
)
330 bool compat
= !!(issue_flags
& IO_URING_F_COMPAT
);
331 int optname
, optlen
, level
;
335 optval
= u64_to_user_ptr(READ_ONCE(cmd
->sqe
->optval
));
336 optname
= READ_ONCE(cmd
->sqe
->optname
);
337 optlen
= READ_ONCE(cmd
->sqe
->optlen
);
338 level
= READ_ONCE(cmd
->sqe
->level
);
339 optval_s
= USER_SOCKPTR(optval
);
341 return do_sock_setsockopt(sock
, compat
, level
, optname
, optval_s
,
345 #if defined(CONFIG_NET)
346 int io_uring_cmd_sock(struct io_uring_cmd
*cmd
, unsigned int issue_flags
)
348 struct socket
*sock
= cmd
->file
->private_data
;
349 struct sock
*sk
= sock
->sk
;
350 struct proto
*prot
= READ_ONCE(sk
->sk_prot
);
353 if (!prot
|| !prot
->ioctl
)
356 switch (cmd
->sqe
->cmd_op
) {
357 case SOCKET_URING_OP_SIOCINQ
:
358 ret
= prot
->ioctl(sk
, SIOCINQ
, &arg
);
362 case SOCKET_URING_OP_SIOCOUTQ
:
363 ret
= prot
->ioctl(sk
, SIOCOUTQ
, &arg
);
367 case SOCKET_URING_OP_GETSOCKOPT
:
368 return io_uring_cmd_getsockopt(sock
, cmd
, issue_flags
);
369 case SOCKET_URING_OP_SETSOCKOPT
:
370 return io_uring_cmd_setsockopt(sock
, cmd
, issue_flags
);
375 EXPORT_SYMBOL_GPL(io_uring_cmd_sock
);