1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
5 #include <linux/file.h>
7 #include <linux/slab.h>
8 #include <linux/namei.h>
9 #include <linux/nospec.h>
10 #include <linux/io_uring.h>
12 #include <uapi/linux/io_uring.h>
30 #define CANCEL_FLAGS (IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_FD | \
31 IORING_ASYNC_CANCEL_ANY | IORING_ASYNC_CANCEL_FD_FIXED | \
32 IORING_ASYNC_CANCEL_USERDATA | IORING_ASYNC_CANCEL_OP)
35 * Returns true if the request matches the criteria outlined by 'cd'.
37 bool io_cancel_req_match(struct io_kiocb
*req
, struct io_cancel_data
*cd
)
39 bool match_user_data
= cd
->flags
& IORING_ASYNC_CANCEL_USERDATA
;
41 if (req
->ctx
!= cd
->ctx
)
44 if (!(cd
->flags
& (IORING_ASYNC_CANCEL_FD
| IORING_ASYNC_CANCEL_OP
)))
45 match_user_data
= true;
47 if (cd
->flags
& IORING_ASYNC_CANCEL_ANY
)
49 if (cd
->flags
& IORING_ASYNC_CANCEL_FD
) {
50 if (req
->file
!= cd
->file
)
53 if (cd
->flags
& IORING_ASYNC_CANCEL_OP
) {
54 if (req
->opcode
!= cd
->opcode
)
57 if (match_user_data
&& req
->cqe
.user_data
!= cd
->data
)
59 if (cd
->flags
& IORING_ASYNC_CANCEL_ALL
) {
61 if (io_cancel_match_sequence(req
, cd
->seq
))
68 static bool io_cancel_cb(struct io_wq_work
*work
, void *data
)
70 struct io_kiocb
*req
= container_of(work
, struct io_kiocb
, work
);
71 struct io_cancel_data
*cd
= data
;
73 return io_cancel_req_match(req
, cd
);
76 static int io_async_cancel_one(struct io_uring_task
*tctx
,
77 struct io_cancel_data
*cd
)
79 enum io_wq_cancel cancel_ret
;
83 if (!tctx
|| !tctx
->io_wq
)
86 all
= cd
->flags
& (IORING_ASYNC_CANCEL_ALL
|IORING_ASYNC_CANCEL_ANY
);
87 cancel_ret
= io_wq_cancel_cb(tctx
->io_wq
, io_cancel_cb
, cd
, all
);
92 case IO_WQ_CANCEL_RUNNING
:
95 case IO_WQ_CANCEL_NOTFOUND
:
103 int io_try_cancel(struct io_uring_task
*tctx
, struct io_cancel_data
*cd
,
104 unsigned issue_flags
)
106 struct io_ring_ctx
*ctx
= cd
->ctx
;
109 WARN_ON_ONCE(!io_wq_current_is_worker() && tctx
!= current
->io_uring
);
111 ret
= io_async_cancel_one(tctx
, cd
);
113 * Fall-through even for -EALREADY, as we may have poll armed
114 * that need unarming.
119 ret
= io_poll_cancel(ctx
, cd
, issue_flags
);
123 ret
= io_waitid_cancel(ctx
, cd
, issue_flags
);
127 ret
= io_futex_cancel(ctx
, cd
, issue_flags
);
131 spin_lock(&ctx
->completion_lock
);
132 if (!(cd
->flags
& IORING_ASYNC_CANCEL_FD
))
133 ret
= io_timeout_cancel(ctx
, cd
);
134 spin_unlock(&ctx
->completion_lock
);
138 int io_async_cancel_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
140 struct io_cancel
*cancel
= io_kiocb_to_cmd(req
, struct io_cancel
);
142 if (unlikely(req
->flags
& REQ_F_BUFFER_SELECT
))
144 if (sqe
->off
|| sqe
->splice_fd_in
)
147 cancel
->addr
= READ_ONCE(sqe
->addr
);
148 cancel
->flags
= READ_ONCE(sqe
->cancel_flags
);
149 if (cancel
->flags
& ~CANCEL_FLAGS
)
151 if (cancel
->flags
& IORING_ASYNC_CANCEL_FD
) {
152 if (cancel
->flags
& IORING_ASYNC_CANCEL_ANY
)
154 cancel
->fd
= READ_ONCE(sqe
->fd
);
156 if (cancel
->flags
& IORING_ASYNC_CANCEL_OP
) {
157 if (cancel
->flags
& IORING_ASYNC_CANCEL_ANY
)
159 cancel
->opcode
= READ_ONCE(sqe
->len
);
165 static int __io_async_cancel(struct io_cancel_data
*cd
,
166 struct io_uring_task
*tctx
,
167 unsigned int issue_flags
)
169 bool all
= cd
->flags
& (IORING_ASYNC_CANCEL_ALL
|IORING_ASYNC_CANCEL_ANY
);
170 struct io_ring_ctx
*ctx
= cd
->ctx
;
171 struct io_tctx_node
*node
;
175 ret
= io_try_cancel(tctx
, cd
, issue_flags
);
183 /* slow path, try all io-wq's */
184 io_ring_submit_lock(ctx
, issue_flags
);
186 list_for_each_entry(node
, &ctx
->tctx_list
, ctx_node
) {
187 ret
= io_async_cancel_one(node
->task
->io_uring
, cd
);
188 if (ret
!= -ENOENT
) {
194 io_ring_submit_unlock(ctx
, issue_flags
);
195 return all
? nr
: ret
;
198 int io_async_cancel(struct io_kiocb
*req
, unsigned int issue_flags
)
200 struct io_cancel
*cancel
= io_kiocb_to_cmd(req
, struct io_cancel
);
201 struct io_cancel_data cd
= {
203 .data
= cancel
->addr
,
204 .flags
= cancel
->flags
,
205 .opcode
= cancel
->opcode
,
206 .seq
= atomic_inc_return(&req
->ctx
->cancel_seq
),
208 struct io_uring_task
*tctx
= req
->task
->io_uring
;
211 if (cd
.flags
& IORING_ASYNC_CANCEL_FD
) {
212 if (req
->flags
& REQ_F_FIXED_FILE
||
213 cd
.flags
& IORING_ASYNC_CANCEL_FD_FIXED
) {
214 req
->flags
|= REQ_F_FIXED_FILE
;
215 req
->file
= io_file_get_fixed(req
, cancel
->fd
,
218 req
->file
= io_file_get_normal(req
, cancel
->fd
);
227 ret
= __io_async_cancel(&cd
, tctx
, issue_flags
);
231 io_req_set_res(req
, ret
, 0);
235 void init_hash_table(struct io_hash_table
*table
, unsigned size
)
239 for (i
= 0; i
< size
; i
++) {
240 spin_lock_init(&table
->hbs
[i
].lock
);
241 INIT_HLIST_HEAD(&table
->hbs
[i
].list
);
245 static int __io_sync_cancel(struct io_uring_task
*tctx
,
246 struct io_cancel_data
*cd
, int fd
)
248 struct io_ring_ctx
*ctx
= cd
->ctx
;
250 /* fixed must be grabbed every time since we drop the uring_lock */
251 if ((cd
->flags
& IORING_ASYNC_CANCEL_FD
) &&
252 (cd
->flags
& IORING_ASYNC_CANCEL_FD_FIXED
)) {
253 if (unlikely(fd
>= ctx
->nr_user_files
))
255 fd
= array_index_nospec(fd
, ctx
->nr_user_files
);
256 cd
->file
= io_file_from_index(&ctx
->file_table
, fd
);
261 return __io_async_cancel(cd
, tctx
, 0);
264 int io_sync_cancel(struct io_ring_ctx
*ctx
, void __user
*arg
)
265 __must_hold(&ctx
->uring_lock
)
267 struct io_cancel_data cd
= {
269 .seq
= atomic_inc_return(&ctx
->cancel_seq
),
271 ktime_t timeout
= KTIME_MAX
;
272 struct io_uring_sync_cancel_reg sc
;
273 struct file
*file
= NULL
;
277 if (copy_from_user(&sc
, arg
, sizeof(sc
)))
279 if (sc
.flags
& ~CANCEL_FLAGS
)
281 for (i
= 0; i
< ARRAY_SIZE(sc
.pad
); i
++)
284 for (i
= 0; i
< ARRAY_SIZE(sc
.pad2
); i
++)
290 cd
.opcode
= sc
.opcode
;
292 /* we can grab a normal file descriptor upfront */
293 if ((cd
.flags
& IORING_ASYNC_CANCEL_FD
) &&
294 !(cd
.flags
& IORING_ASYNC_CANCEL_FD_FIXED
)) {
301 ret
= __io_sync_cancel(current
->io_uring
, &cd
, sc
.fd
);
303 /* found something, done! */
304 if (ret
!= -EALREADY
)
307 if (sc
.timeout
.tv_sec
!= -1UL || sc
.timeout
.tv_nsec
!= -1UL) {
308 struct timespec64 ts
= {
309 .tv_sec
= sc
.timeout
.tv_sec
,
310 .tv_nsec
= sc
.timeout
.tv_nsec
313 timeout
= ktime_add_ns(timespec64_to_ktime(ts
), ktime_get_ns());
317 * Keep looking until we get -ENOENT. we'll get woken everytime
318 * every time a request completes and will retry the cancelation.
321 cd
.seq
= atomic_inc_return(&ctx
->cancel_seq
);
323 prepare_to_wait(&ctx
->cq_wait
, &wait
, TASK_INTERRUPTIBLE
);
325 ret
= __io_sync_cancel(current
->io_uring
, &cd
, sc
.fd
);
327 mutex_unlock(&ctx
->uring_lock
);
328 if (ret
!= -EALREADY
)
331 ret
= io_run_task_work_sig(ctx
);
334 ret
= schedule_hrtimeout(&timeout
, HRTIMER_MODE_ABS
);
339 mutex_lock(&ctx
->uring_lock
);
342 finish_wait(&ctx
->cq_wait
, &wait
);
343 mutex_lock(&ctx
->uring_lock
);
345 if (ret
== -ENOENT
|| ret
> 0)