1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
5 #include <linux/file.h>
6 #include <linux/fdtable.h>
7 #include <linux/fsnotify.h>
8 #include <linux/namei.h>
9 #include <linux/io_uring.h>
11 #include <uapi/linux/io_uring.h>
13 #include "../fs/internal.h"
17 #include "openclose.h"
23 struct filename
*filename
;
34 struct io_fixed_install
{
39 static bool io_openat_force_async(struct io_open
*open
)
42 * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
43 * it'll always -EAGAIN. Note that we test for __O_TMPFILE because
44 * O_TMPFILE includes O_DIRECTORY, which isn't a flag we need to force
47 return open
->how
.flags
& (O_TRUNC
| O_CREAT
| __O_TMPFILE
);
50 static int __io_openat_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
52 struct io_open
*open
= io_kiocb_to_cmd(req
, struct io_open
);
53 const char __user
*fname
;
56 if (unlikely(sqe
->buf_index
))
58 if (unlikely(req
->flags
& REQ_F_FIXED_FILE
))
61 /* open.how should be already initialised */
62 if (!(open
->how
.flags
& O_PATH
) && force_o_largefile())
63 open
->how
.flags
|= O_LARGEFILE
;
65 open
->dfd
= READ_ONCE(sqe
->fd
);
66 fname
= u64_to_user_ptr(READ_ONCE(sqe
->addr
));
67 open
->filename
= getname(fname
);
68 if (IS_ERR(open
->filename
)) {
69 ret
= PTR_ERR(open
->filename
);
70 open
->filename
= NULL
;
74 open
->file_slot
= READ_ONCE(sqe
->file_index
);
75 if (open
->file_slot
&& (open
->how
.flags
& O_CLOEXEC
))
78 open
->nofile
= rlimit(RLIMIT_NOFILE
);
79 req
->flags
|= REQ_F_NEED_CLEANUP
;
80 if (io_openat_force_async(open
))
81 req
->flags
|= REQ_F_FORCE_ASYNC
;
85 int io_openat_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
87 struct io_open
*open
= io_kiocb_to_cmd(req
, struct io_open
);
88 u64 mode
= READ_ONCE(sqe
->len
);
89 u64 flags
= READ_ONCE(sqe
->open_flags
);
91 open
->how
= build_open_how(flags
, mode
);
92 return __io_openat_prep(req
, sqe
);
95 int io_openat2_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
97 struct io_open
*open
= io_kiocb_to_cmd(req
, struct io_open
);
98 struct open_how __user
*how
;
102 how
= u64_to_user_ptr(READ_ONCE(sqe
->addr2
));
103 len
= READ_ONCE(sqe
->len
);
104 if (len
< OPEN_HOW_SIZE_VER0
)
107 ret
= copy_struct_from_user(&open
->how
, sizeof(open
->how
), how
, len
);
111 return __io_openat_prep(req
, sqe
);
114 int io_openat2(struct io_kiocb
*req
, unsigned int issue_flags
)
116 struct io_open
*open
= io_kiocb_to_cmd(req
, struct io_open
);
117 struct open_flags op
;
119 bool resolve_nonblock
, nonblock_set
;
120 bool fixed
= !!open
->file_slot
;
123 ret
= build_open_flags(&open
->how
, &op
);
126 nonblock_set
= op
.open_flag
& O_NONBLOCK
;
127 resolve_nonblock
= open
->how
.resolve
& RESOLVE_CACHED
;
128 if (issue_flags
& IO_URING_F_NONBLOCK
) {
129 WARN_ON_ONCE(io_openat_force_async(open
));
130 op
.lookup_flags
|= LOOKUP_CACHED
;
131 op
.open_flag
|= O_NONBLOCK
;
135 ret
= __get_unused_fd_flags(open
->how
.flags
, open
->nofile
);
140 file
= do_filp_open(open
->dfd
, open
->filename
, &op
);
143 * We could hang on to this 'fd' on retrying, but seems like
144 * marginal gain for something that is now known to be a slower
145 * path. So just put it, and we'll get a new one when we retry.
151 /* only retry if RESOLVE_CACHED wasn't already set by application */
152 if (ret
== -EAGAIN
&&
153 (!resolve_nonblock
&& (issue_flags
& IO_URING_F_NONBLOCK
)))
158 if ((issue_flags
& IO_URING_F_NONBLOCK
) && !nonblock_set
)
159 file
->f_flags
&= ~O_NONBLOCK
;
162 fd_install(ret
, file
);
164 ret
= io_fixed_fd_install(req
, issue_flags
, file
,
167 putname(open
->filename
);
168 req
->flags
&= ~REQ_F_NEED_CLEANUP
;
171 io_req_set_res(req
, ret
, 0);
175 int io_openat(struct io_kiocb
*req
, unsigned int issue_flags
)
177 return io_openat2(req
, issue_flags
);
180 void io_open_cleanup(struct io_kiocb
*req
)
182 struct io_open
*open
= io_kiocb_to_cmd(req
, struct io_open
);
185 putname(open
->filename
);
188 int __io_close_fixed(struct io_ring_ctx
*ctx
, unsigned int issue_flags
,
193 io_ring_submit_lock(ctx
, issue_flags
);
194 ret
= io_fixed_fd_remove(ctx
, offset
);
195 io_ring_submit_unlock(ctx
, issue_flags
);
200 static inline int io_close_fixed(struct io_kiocb
*req
, unsigned int issue_flags
)
202 struct io_close
*close
= io_kiocb_to_cmd(req
, struct io_close
);
204 return __io_close_fixed(req
->ctx
, issue_flags
, close
->file_slot
- 1);
207 int io_close_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
209 struct io_close
*close
= io_kiocb_to_cmd(req
, struct io_close
);
211 if (sqe
->off
|| sqe
->addr
|| sqe
->len
|| sqe
->rw_flags
|| sqe
->buf_index
)
213 if (req
->flags
& REQ_F_FIXED_FILE
)
216 close
->fd
= READ_ONCE(sqe
->fd
);
217 close
->file_slot
= READ_ONCE(sqe
->file_index
);
218 if (close
->file_slot
&& close
->fd
)
224 int io_close(struct io_kiocb
*req
, unsigned int issue_flags
)
226 struct files_struct
*files
= current
->files
;
227 struct io_close
*close
= io_kiocb_to_cmd(req
, struct io_close
);
231 if (close
->file_slot
) {
232 ret
= io_close_fixed(req
, issue_flags
);
236 spin_lock(&files
->file_lock
);
237 file
= files_lookup_fd_locked(files
, close
->fd
);
238 if (!file
|| io_is_uring_fops(file
)) {
239 spin_unlock(&files
->file_lock
);
243 /* if the file has a flush method, be safe and punt to async */
244 if (file
->f_op
->flush
&& (issue_flags
& IO_URING_F_NONBLOCK
)) {
245 spin_unlock(&files
->file_lock
);
249 file
= file_close_fd_locked(files
, close
->fd
);
250 spin_unlock(&files
->file_lock
);
254 /* No ->flush() or already async, safely close from here */
255 ret
= filp_close(file
, current
->files
);
259 io_req_set_res(req
, ret
, 0);
263 int io_install_fixed_fd_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
265 struct io_fixed_install
*ifi
;
268 if (sqe
->off
|| sqe
->addr
|| sqe
->len
|| sqe
->buf_index
||
269 sqe
->splice_fd_in
|| sqe
->addr3
)
272 /* must be a fixed file */
273 if (!(req
->flags
& REQ_F_FIXED_FILE
))
276 flags
= READ_ONCE(sqe
->install_fd_flags
);
277 if (flags
& ~IORING_FIXED_FD_NO_CLOEXEC
)
280 /* ensure the task's creds are used when installing/receiving fds */
281 if (req
->flags
& REQ_F_CREDS
)
284 /* default to O_CLOEXEC, disable if IORING_FIXED_FD_NO_CLOEXEC is set */
285 ifi
= io_kiocb_to_cmd(req
, struct io_fixed_install
);
286 ifi
->o_flags
= O_CLOEXEC
;
287 if (flags
& IORING_FIXED_FD_NO_CLOEXEC
)
293 int io_install_fixed_fd(struct io_kiocb
*req
, unsigned int issue_flags
)
295 struct io_fixed_install
*ifi
;
298 ifi
= io_kiocb_to_cmd(req
, struct io_fixed_install
);
299 ret
= receive_fd(req
->file
, NULL
, ifi
->o_flags
);
302 io_req_set_res(req
, ret
, 0);