1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/io_uring/cmd.h>
6 #include <linux/io_uring/net.h>
7 #include <linux/security.h>
8 #include <linux/nospec.h>
11 #include <uapi/linux/io_uring.h>
12 #include <asm/ioctls.h>
15 #include "alloc_cache.h"
17 #include "uring_cmd.h"
19 static struct uring_cache
*io_uring_async_get(struct io_kiocb
*req
)
21 struct io_ring_ctx
*ctx
= req
->ctx
;
22 struct uring_cache
*cache
;
24 cache
= io_alloc_cache_get(&ctx
->uring_cache
);
26 req
->flags
|= REQ_F_ASYNC_DATA
;
27 req
->async_data
= cache
;
30 if (!io_alloc_async_data(req
))
31 return req
->async_data
;
35 static void io_req_uring_cleanup(struct io_kiocb
*req
, unsigned int issue_flags
)
37 struct io_uring_cmd
*ioucmd
= io_kiocb_to_cmd(req
, struct io_uring_cmd
);
38 struct uring_cache
*cache
= req
->async_data
;
40 if (issue_flags
& IO_URING_F_UNLOCKED
)
42 if (io_alloc_cache_put(&req
->ctx
->uring_cache
, cache
)) {
44 req
->async_data
= NULL
;
45 req
->flags
&= ~REQ_F_ASYNC_DATA
;
49 bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx
*ctx
,
50 struct task_struct
*task
, bool cancel_all
)
52 struct hlist_node
*tmp
;
56 lockdep_assert_held(&ctx
->uring_lock
);
58 hlist_for_each_entry_safe(req
, tmp
, &ctx
->cancelable_uring_cmd
,
60 struct io_uring_cmd
*cmd
= io_kiocb_to_cmd(req
,
62 struct file
*file
= req
->file
;
64 if (!cancel_all
&& req
->task
!= task
)
67 if (cmd
->flags
& IORING_URING_CMD_CANCELABLE
) {
68 /* ->sqe isn't available if no async data */
69 if (!req_has_async_data(req
))
71 file
->f_op
->uring_cmd(cmd
, IO_URING_F_CANCEL
|
72 IO_URING_F_COMPLETE_DEFER
);
76 io_submit_flush_completions(ctx
);
80 static void io_uring_cmd_del_cancelable(struct io_uring_cmd
*cmd
,
81 unsigned int issue_flags
)
83 struct io_kiocb
*req
= cmd_to_io_kiocb(cmd
);
84 struct io_ring_ctx
*ctx
= req
->ctx
;
86 if (!(cmd
->flags
& IORING_URING_CMD_CANCELABLE
))
89 cmd
->flags
&= ~IORING_URING_CMD_CANCELABLE
;
90 io_ring_submit_lock(ctx
, issue_flags
);
91 hlist_del(&req
->hash_node
);
92 io_ring_submit_unlock(ctx
, issue_flags
);
96 * Mark this command as concelable, then io_uring_try_cancel_uring_cmd()
97 * will try to cancel this issued command by sending ->uring_cmd() with
98 * issue_flags of IO_URING_F_CANCEL.
100 * The command is guaranteed to not be done when calling ->uring_cmd()
101 * with IO_URING_F_CANCEL, but it is driver's responsibility to deal
102 * with race between io_uring canceling and normal completion.
104 void io_uring_cmd_mark_cancelable(struct io_uring_cmd
*cmd
,
105 unsigned int issue_flags
)
107 struct io_kiocb
*req
= cmd_to_io_kiocb(cmd
);
108 struct io_ring_ctx
*ctx
= req
->ctx
;
110 if (!(cmd
->flags
& IORING_URING_CMD_CANCELABLE
)) {
111 cmd
->flags
|= IORING_URING_CMD_CANCELABLE
;
112 io_ring_submit_lock(ctx
, issue_flags
);
113 hlist_add_head(&req
->hash_node
, &ctx
->cancelable_uring_cmd
);
114 io_ring_submit_unlock(ctx
, issue_flags
);
117 EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable
);
119 static void io_uring_cmd_work(struct io_kiocb
*req
, struct io_tw_state
*ts
)
121 struct io_uring_cmd
*ioucmd
= io_kiocb_to_cmd(req
, struct io_uring_cmd
);
123 /* task_work executor checks the deffered list completion */
124 ioucmd
->task_work_cb(ioucmd
, IO_URING_F_COMPLETE_DEFER
);
127 void __io_uring_cmd_do_in_task(struct io_uring_cmd
*ioucmd
,
128 void (*task_work_cb
)(struct io_uring_cmd
*, unsigned),
131 struct io_kiocb
*req
= cmd_to_io_kiocb(ioucmd
);
133 ioucmd
->task_work_cb
= task_work_cb
;
134 req
->io_task_work
.func
= io_uring_cmd_work
;
135 __io_req_task_work_add(req
, flags
);
137 EXPORT_SYMBOL_GPL(__io_uring_cmd_do_in_task
);
139 static inline void io_req_set_cqe32_extra(struct io_kiocb
*req
,
140 u64 extra1
, u64 extra2
)
142 req
->big_cqe
.extra1
= extra1
;
143 req
->big_cqe
.extra2
= extra2
;
147 * Called by consumers of io_uring_cmd, if they originally returned
148 * -EIOCBQUEUED upon receiving the command.
150 void io_uring_cmd_done(struct io_uring_cmd
*ioucmd
, ssize_t ret
, ssize_t res2
,
151 unsigned issue_flags
)
153 struct io_kiocb
*req
= cmd_to_io_kiocb(ioucmd
);
155 io_uring_cmd_del_cancelable(ioucmd
, issue_flags
);
160 io_req_set_res(req
, ret
, 0);
161 if (req
->ctx
->flags
& IORING_SETUP_CQE32
)
162 io_req_set_cqe32_extra(req
, res2
, 0);
163 io_req_uring_cleanup(req
, issue_flags
);
164 if (req
->ctx
->flags
& IORING_SETUP_IOPOLL
) {
165 /* order with io_iopoll_req_issued() checking ->iopoll_complete */
166 smp_store_release(&req
->iopoll_completed
, 1);
167 } else if (issue_flags
& IO_URING_F_COMPLETE_DEFER
) {
168 if (WARN_ON_ONCE(issue_flags
& IO_URING_F_UNLOCKED
))
170 io_req_complete_defer(req
);
172 req
->io_task_work
.func
= io_req_task_complete
;
173 io_req_task_work_add(req
);
176 EXPORT_SYMBOL_GPL(io_uring_cmd_done
);
178 static int io_uring_cmd_prep_setup(struct io_kiocb
*req
,
179 const struct io_uring_sqe
*sqe
)
181 struct io_uring_cmd
*ioucmd
= io_kiocb_to_cmd(req
, struct io_uring_cmd
);
182 struct uring_cache
*cache
;
184 cache
= io_uring_async_get(req
);
185 if (unlikely(!cache
))
188 if (!(req
->flags
& REQ_F_FORCE_ASYNC
)) {
189 /* defer memcpy until we need it */
194 memcpy(req
->async_data
, sqe
, uring_sqe_size(req
->ctx
));
195 ioucmd
->sqe
= req
->async_data
;
199 int io_uring_cmd_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
201 struct io_uring_cmd
*ioucmd
= io_kiocb_to_cmd(req
, struct io_uring_cmd
);
206 ioucmd
->flags
= READ_ONCE(sqe
->uring_cmd_flags
);
207 if (ioucmd
->flags
& ~IORING_URING_CMD_MASK
)
210 if (ioucmd
->flags
& IORING_URING_CMD_FIXED
) {
211 struct io_ring_ctx
*ctx
= req
->ctx
;
214 req
->buf_index
= READ_ONCE(sqe
->buf_index
);
215 if (unlikely(req
->buf_index
>= ctx
->nr_user_bufs
))
217 index
= array_index_nospec(req
->buf_index
, ctx
->nr_user_bufs
);
218 req
->imu
= ctx
->user_bufs
[index
];
219 io_req_set_rsrc_node(req
, ctx
, 0);
221 ioucmd
->cmd_op
= READ_ONCE(sqe
->cmd_op
);
223 return io_uring_cmd_prep_setup(req
, sqe
);
226 int io_uring_cmd(struct io_kiocb
*req
, unsigned int issue_flags
)
228 struct io_uring_cmd
*ioucmd
= io_kiocb_to_cmd(req
, struct io_uring_cmd
);
229 struct io_ring_ctx
*ctx
= req
->ctx
;
230 struct file
*file
= req
->file
;
233 if (!file
->f_op
->uring_cmd
)
236 ret
= security_uring_cmd(ioucmd
);
240 if (ctx
->flags
& IORING_SETUP_SQE128
)
241 issue_flags
|= IO_URING_F_SQE128
;
242 if (ctx
->flags
& IORING_SETUP_CQE32
)
243 issue_flags
|= IO_URING_F_CQE32
;
245 issue_flags
|= IO_URING_F_COMPAT
;
246 if (ctx
->flags
& IORING_SETUP_IOPOLL
) {
247 if (!file
->f_op
->uring_cmd_iopoll
)
249 issue_flags
|= IO_URING_F_IOPOLL
;
250 req
->iopoll_completed
= 0;
253 ret
= file
->f_op
->uring_cmd(ioucmd
, issue_flags
);
254 if (ret
== -EAGAIN
) {
255 struct uring_cache
*cache
= req
->async_data
;
257 if (ioucmd
->sqe
!= (void *) cache
)
258 memcpy(cache
, ioucmd
->sqe
, uring_sqe_size(req
->ctx
));
260 } else if (ret
== -EIOCBQUEUED
) {
266 io_req_uring_cleanup(req
, issue_flags
);
267 io_req_set_res(req
, ret
, 0);
271 int io_uring_cmd_import_fixed(u64 ubuf
, unsigned long len
, int rw
,
272 struct iov_iter
*iter
, void *ioucmd
)
274 struct io_kiocb
*req
= cmd_to_io_kiocb(ioucmd
);
276 return io_import_fixed(rw
, iter
, req
->imu
, ubuf
, len
);
278 EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed
);
280 void io_uring_cmd_issue_blocking(struct io_uring_cmd
*ioucmd
)
282 struct io_kiocb
*req
= cmd_to_io_kiocb(ioucmd
);
284 io_req_queue_iowq(req
);
287 static inline int io_uring_cmd_getsockopt(struct socket
*sock
,
288 struct io_uring_cmd
*cmd
,
289 unsigned int issue_flags
)
291 bool compat
= !!(issue_flags
& IO_URING_F_COMPAT
);
292 int optlen
, optname
, level
, err
;
295 level
= READ_ONCE(cmd
->sqe
->level
);
296 if (level
!= SOL_SOCKET
)
299 optval
= u64_to_user_ptr(READ_ONCE(cmd
->sqe
->optval
));
300 optname
= READ_ONCE(cmd
->sqe
->optname
);
301 optlen
= READ_ONCE(cmd
->sqe
->optlen
);
303 err
= do_sock_getsockopt(sock
, compat
, level
, optname
,
304 USER_SOCKPTR(optval
),
305 KERNEL_SOCKPTR(&optlen
));
309 /* On success, return optlen */
313 static inline int io_uring_cmd_setsockopt(struct socket
*sock
,
314 struct io_uring_cmd
*cmd
,
315 unsigned int issue_flags
)
317 bool compat
= !!(issue_flags
& IO_URING_F_COMPAT
);
318 int optname
, optlen
, level
;
322 optval
= u64_to_user_ptr(READ_ONCE(cmd
->sqe
->optval
));
323 optname
= READ_ONCE(cmd
->sqe
->optname
);
324 optlen
= READ_ONCE(cmd
->sqe
->optlen
);
325 level
= READ_ONCE(cmd
->sqe
->level
);
326 optval_s
= USER_SOCKPTR(optval
);
328 return do_sock_setsockopt(sock
, compat
, level
, optname
, optval_s
,
332 #if defined(CONFIG_NET)
333 int io_uring_cmd_sock(struct io_uring_cmd
*cmd
, unsigned int issue_flags
)
335 struct socket
*sock
= cmd
->file
->private_data
;
336 struct sock
*sk
= sock
->sk
;
337 struct proto
*prot
= READ_ONCE(sk
->sk_prot
);
340 if (!prot
|| !prot
->ioctl
)
343 switch (cmd
->sqe
->cmd_op
) {
344 case SOCKET_URING_OP_SIOCINQ
:
345 ret
= prot
->ioctl(sk
, SIOCINQ
, &arg
);
349 case SOCKET_URING_OP_SIOCOUTQ
:
350 ret
= prot
->ioctl(sk
, SIOCOUTQ
, &arg
);
354 case SOCKET_URING_OP_GETSOCKOPT
:
355 return io_uring_cmd_getsockopt(sock
, cmd
, issue_flags
);
356 case SOCKET_URING_OP_SETSOCKOPT
:
357 return io_uring_cmd_setsockopt(sock
, cmd
, issue_flags
);
362 EXPORT_SYMBOL_GPL(io_uring_cmd_sock
);