Merge tag 'x86-urgent-2025-01-28' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-stable.git] / fs / file.c
blobd868cdb95d1e78668fa561b14bb8b4b3554eabfa
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/file.c
5 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
7 * Manage the dynamic fd arrays in the process files_struct.
8 */
10 #include <linux/syscalls.h>
11 #include <linux/export.h>
12 #include <linux/fs.h>
13 #include <linux/kernel.h>
14 #include <linux/mm.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/file.h>
18 #include <linux/fdtable.h>
19 #include <linux/bitops.h>
20 #include <linux/spinlock.h>
21 #include <linux/rcupdate.h>
22 #include <linux/close_range.h>
23 #include <linux/file_ref.h>
24 #include <net/sock.h>
25 #include <linux/init_task.h>
27 #include "internal.h"
29 /**
30 * __file_ref_put - Slowpath of file_ref_put()
31 * @ref: Pointer to the reference count
32 * @cnt: Current reference count
34 * Invoked when the reference count is outside of the valid zone.
36 * Return:
37 * True if this was the last reference with no future references
38 * possible. This signals the caller that it can safely schedule the
39 * object, which is protected by the reference counter, for
40 * deconstruction.
42 * False if there are still active references or the put() raced
43 * with a concurrent get()/put() pair. Caller is not allowed to
44 * deconstruct the protected object.
46 bool __file_ref_put(file_ref_t *ref, unsigned long cnt)
48 /* Did this drop the last reference? */
49 if (likely(cnt == FILE_REF_NOREF)) {
51 * Carefully try to set the reference count to FILE_REF_DEAD.
53 * This can fail if a concurrent get() operation has
54 * elevated it again or the corresponding put() even marked
55 * it dead already. Both are valid situations and do not
56 * require a retry. If this fails the caller is not
57 * allowed to deconstruct the object.
59 if (!atomic_long_try_cmpxchg_release(&ref->refcnt, &cnt, FILE_REF_DEAD))
60 return false;
63 * The caller can safely schedule the object for
64 * deconstruction. Provide acquire ordering.
66 smp_acquire__after_ctrl_dep();
67 return true;
71 * If the reference count was already in the dead zone, then this
72 * put() operation is imbalanced. Warn, put the reference count back to
73 * DEAD and tell the caller to not deconstruct the object.
75 if (WARN_ONCE(cnt >= FILE_REF_RELEASED, "imbalanced put on file reference count")) {
76 atomic_long_set(&ref->refcnt, FILE_REF_DEAD);
77 return false;
81 * This is a put() operation on a saturated refcount. Restore the
82 * mean saturation value and tell the caller to not deconstruct the
83 * object.
85 if (cnt > FILE_REF_MAXREF)
86 atomic_long_set(&ref->refcnt, FILE_REF_SATURATED);
87 return false;
89 EXPORT_SYMBOL_GPL(__file_ref_put);
91 unsigned int sysctl_nr_open __read_mostly = 1024*1024;
92 unsigned int sysctl_nr_open_min = BITS_PER_LONG;
93 /* our min() is unusable in constant expressions ;-/ */
94 #define __const_min(x, y) ((x) < (y) ? (x) : (y))
95 unsigned int sysctl_nr_open_max =
96 __const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG;
98 static void __free_fdtable(struct fdtable *fdt)
100 kvfree(fdt->fd);
101 kvfree(fdt->open_fds);
102 kfree(fdt);
105 static void free_fdtable_rcu(struct rcu_head *rcu)
107 __free_fdtable(container_of(rcu, struct fdtable, rcu));
110 #define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr))
111 #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
113 #define fdt_words(fdt) ((fdt)->max_fds / BITS_PER_LONG) // words in ->open_fds
115 * Copy 'count' fd bits from the old table to the new table and clear the extra
116 * space if any. This does not copy the file pointers. Called with the files
117 * spinlock held for write.
119 static inline void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
120 unsigned int copy_words)
122 unsigned int nwords = fdt_words(nfdt);
124 bitmap_copy_and_extend(nfdt->open_fds, ofdt->open_fds,
125 copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG);
126 bitmap_copy_and_extend(nfdt->close_on_exec, ofdt->close_on_exec,
127 copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG);
128 bitmap_copy_and_extend(nfdt->full_fds_bits, ofdt->full_fds_bits,
129 copy_words, nwords);
133 * Copy all file descriptors from the old table to the new, expanded table and
134 * clear the extra space. Called with the files spinlock held for write.
136 static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
138 size_t cpy, set;
140 BUG_ON(nfdt->max_fds < ofdt->max_fds);
142 cpy = ofdt->max_fds * sizeof(struct file *);
143 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
144 memcpy(nfdt->fd, ofdt->fd, cpy);
145 memset((char *)nfdt->fd + cpy, 0, set);
147 copy_fd_bitmaps(nfdt, ofdt, fdt_words(ofdt));
151 * Note how the fdtable bitmap allocations very much have to be a multiple of
152 * BITS_PER_LONG. This is not only because we walk those things in chunks of
153 * 'unsigned long' in some places, but simply because that is how the Linux
154 * kernel bitmaps are defined to work: they are not "bits in an array of bytes",
155 * they are very much "bits in an array of unsigned long".
157 static struct fdtable *alloc_fdtable(unsigned int slots_wanted)
159 struct fdtable *fdt;
160 unsigned int nr;
161 void *data;
164 * Figure out how many fds we actually want to support in this fdtable.
165 * Allocation steps are keyed to the size of the fdarray, since it
166 * grows far faster than any of the other dynamic data. We try to fit
167 * the fdarray into comfortable page-tuned chunks: starting at 1024B
168 * and growing in powers of two from there on. Since we called only
169 * with slots_wanted > BITS_PER_LONG (embedded instance in files->fdtab
170 * already gives BITS_PER_LONG slots), the above boils down to
171 * 1. use the smallest power of two large enough to give us that many
172 * slots.
173 * 2. on 32bit skip 64 and 128 - the minimal capacity we want there is
174 * 256 slots (i.e. 1Kb fd array).
175 * 3. on 64bit don't skip anything, 1Kb fd array means 128 slots there
176 * and we are never going to be asked for 64 or less.
178 if (IS_ENABLED(CONFIG_32BIT) && slots_wanted < 256)
179 nr = 256;
180 else
181 nr = roundup_pow_of_two(slots_wanted);
183 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
184 * had been set lower between the check in expand_files() and here.
186 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
187 * bitmaps handling below becomes unpleasant, to put it mildly...
189 if (unlikely(nr > sysctl_nr_open)) {
190 nr = round_down(sysctl_nr_open, BITS_PER_LONG);
191 if (nr < slots_wanted)
192 return ERR_PTR(-EMFILE);
195 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL_ACCOUNT);
196 if (!fdt)
197 goto out;
198 fdt->max_fds = nr;
199 data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT);
200 if (!data)
201 goto out_fdt;
202 fdt->fd = data;
204 data = kvmalloc(max_t(size_t,
205 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES),
206 GFP_KERNEL_ACCOUNT);
207 if (!data)
208 goto out_arr;
209 fdt->open_fds = data;
210 data += nr / BITS_PER_BYTE;
211 fdt->close_on_exec = data;
212 data += nr / BITS_PER_BYTE;
213 fdt->full_fds_bits = data;
215 return fdt;
217 out_arr:
218 kvfree(fdt->fd);
219 out_fdt:
220 kfree(fdt);
221 out:
222 return ERR_PTR(-ENOMEM);
226 * Expand the file descriptor table.
227 * This function will allocate a new fdtable and both fd array and fdset, of
228 * the given size.
229 * Return <0 error code on error; 0 on successful completion.
230 * The files->file_lock should be held on entry, and will be held on exit.
232 static int expand_fdtable(struct files_struct *files, unsigned int nr)
233 __releases(files->file_lock)
234 __acquires(files->file_lock)
236 struct fdtable *new_fdt, *cur_fdt;
238 spin_unlock(&files->file_lock);
239 new_fdt = alloc_fdtable(nr + 1);
241 /* make sure all fd_install() have seen resize_in_progress
242 * or have finished their rcu_read_lock_sched() section.
244 if (atomic_read(&files->count) > 1)
245 synchronize_rcu();
247 spin_lock(&files->file_lock);
248 if (IS_ERR(new_fdt))
249 return PTR_ERR(new_fdt);
250 cur_fdt = files_fdtable(files);
251 BUG_ON(nr < cur_fdt->max_fds);
252 copy_fdtable(new_fdt, cur_fdt);
253 rcu_assign_pointer(files->fdt, new_fdt);
254 if (cur_fdt != &files->fdtab)
255 call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
256 /* coupled with smp_rmb() in fd_install() */
257 smp_wmb();
258 return 0;
262 * Expand files.
263 * This function will expand the file structures, if the requested size exceeds
264 * the current capacity and there is room for expansion.
265 * Return <0 error code on error; 0 on success.
266 * The files->file_lock should be held on entry, and will be held on exit.
268 static int expand_files(struct files_struct *files, unsigned int nr)
269 __releases(files->file_lock)
270 __acquires(files->file_lock)
272 struct fdtable *fdt;
273 int error;
275 repeat:
276 fdt = files_fdtable(files);
278 /* Do we need to expand? */
279 if (nr < fdt->max_fds)
280 return 0;
282 if (unlikely(files->resize_in_progress)) {
283 spin_unlock(&files->file_lock);
284 wait_event(files->resize_wait, !files->resize_in_progress);
285 spin_lock(&files->file_lock);
286 goto repeat;
289 /* Can we expand? */
290 if (unlikely(nr >= sysctl_nr_open))
291 return -EMFILE;
293 /* All good, so we try */
294 files->resize_in_progress = true;
295 error = expand_fdtable(files, nr);
296 files->resize_in_progress = false;
298 wake_up_all(&files->resize_wait);
299 return error;
302 static inline void __set_close_on_exec(unsigned int fd, struct fdtable *fdt,
303 bool set)
305 if (set) {
306 __set_bit(fd, fdt->close_on_exec);
307 } else {
308 if (test_bit(fd, fdt->close_on_exec))
309 __clear_bit(fd, fdt->close_on_exec);
313 static inline void __set_open_fd(unsigned int fd, struct fdtable *fdt, bool set)
315 __set_bit(fd, fdt->open_fds);
316 __set_close_on_exec(fd, fdt, set);
317 fd /= BITS_PER_LONG;
318 if (!~fdt->open_fds[fd])
319 __set_bit(fd, fdt->full_fds_bits);
322 static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt)
324 __clear_bit(fd, fdt->open_fds);
325 fd /= BITS_PER_LONG;
326 if (test_bit(fd, fdt->full_fds_bits))
327 __clear_bit(fd, fdt->full_fds_bits);
330 static inline bool fd_is_open(unsigned int fd, const struct fdtable *fdt)
332 return test_bit(fd, fdt->open_fds);
336 * Note that a sane fdtable size always has to be a multiple of
337 * BITS_PER_LONG, since we have bitmaps that are sized by this.
339 * punch_hole is optional - when close_range() is asked to unshare
340 * and close, we don't need to copy descriptors in that range, so
341 * a smaller cloned descriptor table might suffice if the last
342 * currently opened descriptor falls into that range.
344 static unsigned int sane_fdtable_size(struct fdtable *fdt, struct fd_range *punch_hole)
346 unsigned int last = find_last_bit(fdt->open_fds, fdt->max_fds);
348 if (last == fdt->max_fds)
349 return NR_OPEN_DEFAULT;
350 if (punch_hole && punch_hole->to >= last && punch_hole->from <= last) {
351 last = find_last_bit(fdt->open_fds, punch_hole->from);
352 if (last == punch_hole->from)
353 return NR_OPEN_DEFAULT;
355 return ALIGN(last + 1, BITS_PER_LONG);
359 * Allocate a new descriptor table and copy contents from the passed in
360 * instance. Returns a pointer to cloned table on success, ERR_PTR()
361 * on failure. For 'punch_hole' see sane_fdtable_size().
363 struct files_struct *dup_fd(struct files_struct *oldf, struct fd_range *punch_hole)
365 struct files_struct *newf;
366 struct file **old_fds, **new_fds;
367 unsigned int open_files, i;
368 struct fdtable *old_fdt, *new_fdt;
370 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
371 if (!newf)
372 return ERR_PTR(-ENOMEM);
374 atomic_set(&newf->count, 1);
376 spin_lock_init(&newf->file_lock);
377 newf->resize_in_progress = false;
378 init_waitqueue_head(&newf->resize_wait);
379 newf->next_fd = 0;
380 new_fdt = &newf->fdtab;
381 new_fdt->max_fds = NR_OPEN_DEFAULT;
382 new_fdt->close_on_exec = newf->close_on_exec_init;
383 new_fdt->open_fds = newf->open_fds_init;
384 new_fdt->full_fds_bits = newf->full_fds_bits_init;
385 new_fdt->fd = &newf->fd_array[0];
387 spin_lock(&oldf->file_lock);
388 old_fdt = files_fdtable(oldf);
389 open_files = sane_fdtable_size(old_fdt, punch_hole);
392 * Check whether we need to allocate a larger fd array and fd set.
394 while (unlikely(open_files > new_fdt->max_fds)) {
395 spin_unlock(&oldf->file_lock);
397 if (new_fdt != &newf->fdtab)
398 __free_fdtable(new_fdt);
400 new_fdt = alloc_fdtable(open_files);
401 if (IS_ERR(new_fdt)) {
402 kmem_cache_free(files_cachep, newf);
403 return ERR_CAST(new_fdt);
407 * Reacquire the oldf lock and a pointer to its fd table
408 * who knows it may have a new bigger fd table. We need
409 * the latest pointer.
411 spin_lock(&oldf->file_lock);
412 old_fdt = files_fdtable(oldf);
413 open_files = sane_fdtable_size(old_fdt, punch_hole);
416 copy_fd_bitmaps(new_fdt, old_fdt, open_files / BITS_PER_LONG);
418 old_fds = old_fdt->fd;
419 new_fds = new_fdt->fd;
421 for (i = open_files; i != 0; i--) {
422 struct file *f = *old_fds++;
423 if (f) {
424 get_file(f);
425 } else {
427 * The fd may be claimed in the fd bitmap but not yet
428 * instantiated in the files array if a sibling thread
429 * is partway through open(). So make sure that this
430 * fd is available to the new process.
432 __clear_open_fd(open_files - i, new_fdt);
434 rcu_assign_pointer(*new_fds++, f);
436 spin_unlock(&oldf->file_lock);
438 /* clear the remainder */
439 memset(new_fds, 0, (new_fdt->max_fds - open_files) * sizeof(struct file *));
441 rcu_assign_pointer(newf->fdt, new_fdt);
443 return newf;
446 static struct fdtable *close_files(struct files_struct * files)
449 * It is safe to dereference the fd table without RCU or
450 * ->file_lock because this is the last reference to the
451 * files structure.
453 struct fdtable *fdt = rcu_dereference_raw(files->fdt);
454 unsigned int i, j = 0;
456 for (;;) {
457 unsigned long set;
458 i = j * BITS_PER_LONG;
459 if (i >= fdt->max_fds)
460 break;
461 set = fdt->open_fds[j++];
462 while (set) {
463 if (set & 1) {
464 struct file *file = fdt->fd[i];
465 if (file) {
466 filp_close(file, files);
467 cond_resched();
470 i++;
471 set >>= 1;
475 return fdt;
478 void put_files_struct(struct files_struct *files)
480 if (atomic_dec_and_test(&files->count)) {
481 struct fdtable *fdt = close_files(files);
483 /* free the arrays if they are not embedded */
484 if (fdt != &files->fdtab)
485 __free_fdtable(fdt);
486 kmem_cache_free(files_cachep, files);
490 void exit_files(struct task_struct *tsk)
492 struct files_struct * files = tsk->files;
494 if (files) {
495 task_lock(tsk);
496 tsk->files = NULL;
497 task_unlock(tsk);
498 put_files_struct(files);
502 struct files_struct init_files = {
503 .count = ATOMIC_INIT(1),
504 .fdt = &init_files.fdtab,
505 .fdtab = {
506 .max_fds = NR_OPEN_DEFAULT,
507 .fd = &init_files.fd_array[0],
508 .close_on_exec = init_files.close_on_exec_init,
509 .open_fds = init_files.open_fds_init,
510 .full_fds_bits = init_files.full_fds_bits_init,
512 .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
513 .resize_wait = __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait),
516 static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
518 unsigned int maxfd = fdt->max_fds; /* always multiple of BITS_PER_LONG */
519 unsigned int maxbit = maxfd / BITS_PER_LONG;
520 unsigned int bitbit = start / BITS_PER_LONG;
521 unsigned int bit;
524 * Try to avoid looking at the second level bitmap
526 bit = find_next_zero_bit(&fdt->open_fds[bitbit], BITS_PER_LONG,
527 start & (BITS_PER_LONG - 1));
528 if (bit < BITS_PER_LONG)
529 return bit + bitbit * BITS_PER_LONG;
531 bitbit = find_next_zero_bit(fdt->full_fds_bits, maxbit, bitbit) * BITS_PER_LONG;
532 if (bitbit >= maxfd)
533 return maxfd;
534 if (bitbit > start)
535 start = bitbit;
536 return find_next_zero_bit(fdt->open_fds, maxfd, start);
540 * allocate a file descriptor, mark it busy.
542 static int alloc_fd(unsigned start, unsigned end, unsigned flags)
544 struct files_struct *files = current->files;
545 unsigned int fd;
546 int error;
547 struct fdtable *fdt;
549 spin_lock(&files->file_lock);
550 repeat:
551 fdt = files_fdtable(files);
552 fd = start;
553 if (fd < files->next_fd)
554 fd = files->next_fd;
556 if (likely(fd < fdt->max_fds))
557 fd = find_next_fd(fdt, fd);
560 * N.B. For clone tasks sharing a files structure, this test
561 * will limit the total number of files that can be opened.
563 error = -EMFILE;
564 if (unlikely(fd >= end))
565 goto out;
567 if (unlikely(fd >= fdt->max_fds)) {
568 error = expand_files(files, fd);
569 if (error < 0)
570 goto out;
572 goto repeat;
575 if (start <= files->next_fd)
576 files->next_fd = fd + 1;
578 __set_open_fd(fd, fdt, flags & O_CLOEXEC);
579 error = fd;
581 out:
582 spin_unlock(&files->file_lock);
583 return error;
586 int __get_unused_fd_flags(unsigned flags, unsigned long nofile)
588 return alloc_fd(0, nofile, flags);
591 int get_unused_fd_flags(unsigned flags)
593 return __get_unused_fd_flags(flags, rlimit(RLIMIT_NOFILE));
595 EXPORT_SYMBOL(get_unused_fd_flags);
597 static void __put_unused_fd(struct files_struct *files, unsigned int fd)
599 struct fdtable *fdt = files_fdtable(files);
600 __clear_open_fd(fd, fdt);
601 if (fd < files->next_fd)
602 files->next_fd = fd;
605 void put_unused_fd(unsigned int fd)
607 struct files_struct *files = current->files;
608 spin_lock(&files->file_lock);
609 __put_unused_fd(files, fd);
610 spin_unlock(&files->file_lock);
613 EXPORT_SYMBOL(put_unused_fd);
616 * Install a file pointer in the fd array.
618 * The VFS is full of places where we drop the files lock between
619 * setting the open_fds bitmap and installing the file in the file
620 * array. At any such point, we are vulnerable to a dup2() race
621 * installing a file in the array before us. We need to detect this and
622 * fput() the struct file we are about to overwrite in this case.
624 * It should never happen - if we allow dup2() do it, _really_ bad things
625 * will follow.
627 * This consumes the "file" refcount, so callers should treat it
628 * as if they had called fput(file).
631 void fd_install(unsigned int fd, struct file *file)
633 struct files_struct *files = current->files;
634 struct fdtable *fdt;
636 if (WARN_ON_ONCE(unlikely(file->f_mode & FMODE_BACKING)))
637 return;
639 rcu_read_lock_sched();
641 if (unlikely(files->resize_in_progress)) {
642 rcu_read_unlock_sched();
643 spin_lock(&files->file_lock);
644 fdt = files_fdtable(files);
645 WARN_ON(fdt->fd[fd] != NULL);
646 rcu_assign_pointer(fdt->fd[fd], file);
647 spin_unlock(&files->file_lock);
648 return;
650 /* coupled with smp_wmb() in expand_fdtable() */
651 smp_rmb();
652 fdt = rcu_dereference_sched(files->fdt);
653 BUG_ON(fdt->fd[fd] != NULL);
654 rcu_assign_pointer(fdt->fd[fd], file);
655 rcu_read_unlock_sched();
658 EXPORT_SYMBOL(fd_install);
661 * file_close_fd_locked - return file associated with fd
662 * @files: file struct to retrieve file from
663 * @fd: file descriptor to retrieve file for
665 * Doesn't take a separate reference count.
667 * Context: files_lock must be held.
669 * Returns: The file associated with @fd (NULL if @fd is not open)
671 struct file *file_close_fd_locked(struct files_struct *files, unsigned fd)
673 struct fdtable *fdt = files_fdtable(files);
674 struct file *file;
676 lockdep_assert_held(&files->file_lock);
678 if (fd >= fdt->max_fds)
679 return NULL;
681 fd = array_index_nospec(fd, fdt->max_fds);
682 file = fdt->fd[fd];
683 if (file) {
684 rcu_assign_pointer(fdt->fd[fd], NULL);
685 __put_unused_fd(files, fd);
687 return file;
690 int close_fd(unsigned fd)
692 struct files_struct *files = current->files;
693 struct file *file;
695 spin_lock(&files->file_lock);
696 file = file_close_fd_locked(files, fd);
697 spin_unlock(&files->file_lock);
698 if (!file)
699 return -EBADF;
701 return filp_close(file, files);
703 EXPORT_SYMBOL(close_fd);
706 * last_fd - return last valid index into fd table
707 * @fdt: File descriptor table.
709 * Context: Either rcu read lock or files_lock must be held.
711 * Returns: Last valid index into fdtable.
713 static inline unsigned last_fd(struct fdtable *fdt)
715 return fdt->max_fds - 1;
718 static inline void __range_cloexec(struct files_struct *cur_fds,
719 unsigned int fd, unsigned int max_fd)
721 struct fdtable *fdt;
723 /* make sure we're using the correct maximum value */
724 spin_lock(&cur_fds->file_lock);
725 fdt = files_fdtable(cur_fds);
726 max_fd = min(last_fd(fdt), max_fd);
727 if (fd <= max_fd)
728 bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1);
729 spin_unlock(&cur_fds->file_lock);
732 static inline void __range_close(struct files_struct *files, unsigned int fd,
733 unsigned int max_fd)
735 struct file *file;
736 unsigned n;
738 spin_lock(&files->file_lock);
739 n = last_fd(files_fdtable(files));
740 max_fd = min(max_fd, n);
742 for (; fd <= max_fd; fd++) {
743 file = file_close_fd_locked(files, fd);
744 if (file) {
745 spin_unlock(&files->file_lock);
746 filp_close(file, files);
747 cond_resched();
748 spin_lock(&files->file_lock);
749 } else if (need_resched()) {
750 spin_unlock(&files->file_lock);
751 cond_resched();
752 spin_lock(&files->file_lock);
755 spin_unlock(&files->file_lock);
759 * sys_close_range() - Close all file descriptors in a given range.
761 * @fd: starting file descriptor to close
762 * @max_fd: last file descriptor to close
763 * @flags: CLOSE_RANGE flags.
765 * This closes a range of file descriptors. All file descriptors
766 * from @fd up to and including @max_fd are closed.
767 * Currently, errors to close a given file descriptor are ignored.
769 SYSCALL_DEFINE3(close_range, unsigned int, fd, unsigned int, max_fd,
770 unsigned int, flags)
772 struct task_struct *me = current;
773 struct files_struct *cur_fds = me->files, *fds = NULL;
775 if (flags & ~(CLOSE_RANGE_UNSHARE | CLOSE_RANGE_CLOEXEC))
776 return -EINVAL;
778 if (fd > max_fd)
779 return -EINVAL;
781 if ((flags & CLOSE_RANGE_UNSHARE) && atomic_read(&cur_fds->count) > 1) {
782 struct fd_range range = {fd, max_fd}, *punch_hole = &range;
785 * If the caller requested all fds to be made cloexec we always
786 * copy all of the file descriptors since they still want to
787 * use them.
789 if (flags & CLOSE_RANGE_CLOEXEC)
790 punch_hole = NULL;
792 fds = dup_fd(cur_fds, punch_hole);
793 if (IS_ERR(fds))
794 return PTR_ERR(fds);
796 * We used to share our file descriptor table, and have now
797 * created a private one, make sure we're using it below.
799 swap(cur_fds, fds);
802 if (flags & CLOSE_RANGE_CLOEXEC)
803 __range_cloexec(cur_fds, fd, max_fd);
804 else
805 __range_close(cur_fds, fd, max_fd);
807 if (fds) {
809 * We're done closing the files we were supposed to. Time to install
810 * the new file descriptor table and drop the old one.
812 task_lock(me);
813 me->files = cur_fds;
814 task_unlock(me);
815 put_files_struct(fds);
818 return 0;
822 * file_close_fd - return file associated with fd
823 * @fd: file descriptor to retrieve file for
825 * Doesn't take a separate reference count.
827 * Returns: The file associated with @fd (NULL if @fd is not open)
829 struct file *file_close_fd(unsigned int fd)
831 struct files_struct *files = current->files;
832 struct file *file;
834 spin_lock(&files->file_lock);
835 file = file_close_fd_locked(files, fd);
836 spin_unlock(&files->file_lock);
838 return file;
841 void do_close_on_exec(struct files_struct *files)
843 unsigned i;
844 struct fdtable *fdt;
846 /* exec unshares first */
847 spin_lock(&files->file_lock);
848 for (i = 0; ; i++) {
849 unsigned long set;
850 unsigned fd = i * BITS_PER_LONG;
851 fdt = files_fdtable(files);
852 if (fd >= fdt->max_fds)
853 break;
854 set = fdt->close_on_exec[i];
855 if (!set)
856 continue;
857 fdt->close_on_exec[i] = 0;
858 for ( ; set ; fd++, set >>= 1) {
859 struct file *file;
860 if (!(set & 1))
861 continue;
862 file = fdt->fd[fd];
863 if (!file)
864 continue;
865 rcu_assign_pointer(fdt->fd[fd], NULL);
866 __put_unused_fd(files, fd);
867 spin_unlock(&files->file_lock);
868 filp_close(file, files);
869 cond_resched();
870 spin_lock(&files->file_lock);
874 spin_unlock(&files->file_lock);
877 static struct file *__get_file_rcu(struct file __rcu **f)
879 struct file __rcu *file;
880 struct file __rcu *file_reloaded;
881 struct file __rcu *file_reloaded_cmp;
883 file = rcu_dereference_raw(*f);
884 if (!file)
885 return NULL;
887 if (unlikely(!file_ref_get(&file->f_ref)))
888 return ERR_PTR(-EAGAIN);
890 file_reloaded = rcu_dereference_raw(*f);
893 * Ensure that all accesses have a dependency on the load from
894 * rcu_dereference_raw() above so we get correct ordering
895 * between reuse/allocation and the pointer check below.
897 file_reloaded_cmp = file_reloaded;
898 OPTIMIZER_HIDE_VAR(file_reloaded_cmp);
901 * file_ref_get() above provided a full memory barrier when we
902 * acquired a reference.
904 * This is paired with the write barrier from assigning to the
905 * __rcu protected file pointer so that if that pointer still
906 * matches the current file, we know we have successfully
907 * acquired a reference to the right file.
909 * If the pointers don't match the file has been reallocated by
910 * SLAB_TYPESAFE_BY_RCU.
912 if (file == file_reloaded_cmp)
913 return file_reloaded;
915 fput(file);
916 return ERR_PTR(-EAGAIN);
920 * get_file_rcu - try go get a reference to a file under rcu
921 * @f: the file to get a reference on
923 * This function tries to get a reference on @f carefully verifying that
924 * @f hasn't been reused.
926 * This function should rarely have to be used and only by users who
927 * understand the implications of SLAB_TYPESAFE_BY_RCU. Try to avoid it.
929 * Return: Returns @f with the reference count increased or NULL.
931 struct file *get_file_rcu(struct file __rcu **f)
933 for (;;) {
934 struct file __rcu *file;
936 file = __get_file_rcu(f);
937 if (!IS_ERR(file))
938 return file;
941 EXPORT_SYMBOL_GPL(get_file_rcu);
944 * get_file_active - try go get a reference to a file
945 * @f: the file to get a reference on
947 * In contast to get_file_rcu() the pointer itself isn't part of the
948 * reference counting.
950 * This function should rarely have to be used and only by users who
951 * understand the implications of SLAB_TYPESAFE_BY_RCU. Try to avoid it.
953 * Return: Returns @f with the reference count increased or NULL.
955 struct file *get_file_active(struct file **f)
957 struct file __rcu *file;
959 rcu_read_lock();
960 file = __get_file_rcu(f);
961 rcu_read_unlock();
962 if (IS_ERR(file))
963 file = NULL;
964 return file;
966 EXPORT_SYMBOL_GPL(get_file_active);
968 static inline struct file *__fget_files_rcu(struct files_struct *files,
969 unsigned int fd, fmode_t mask)
971 for (;;) {
972 struct file *file;
973 struct fdtable *fdt = rcu_dereference_raw(files->fdt);
974 struct file __rcu **fdentry;
975 unsigned long nospec_mask;
977 /* Mask is a 0 for invalid fd's, ~0 for valid ones */
978 nospec_mask = array_index_mask_nospec(fd, fdt->max_fds);
981 * fdentry points to the 'fd' offset, or fdt->fd[0].
982 * Loading from fdt->fd[0] is always safe, because the
983 * array always exists.
985 fdentry = fdt->fd + (fd & nospec_mask);
987 /* Do the load, then mask any invalid result */
988 file = rcu_dereference_raw(*fdentry);
989 file = (void *)(nospec_mask & (unsigned long)file);
990 if (unlikely(!file))
991 return NULL;
994 * Ok, we have a file pointer that was valid at
995 * some point, but it might have become stale since.
997 * We need to confirm it by incrementing the refcount
998 * and then check the lookup again.
1000 * file_ref_get() gives us a full memory barrier. We
1001 * only really need an 'acquire' one to protect the
1002 * loads below, but we don't have that.
1004 if (unlikely(!file_ref_get(&file->f_ref)))
1005 continue;
1008 * Such a race can take two forms:
1010 * (a) the file ref already went down to zero and the
1011 * file hasn't been reused yet or the file count
1012 * isn't zero but the file has already been reused.
1014 * (b) the file table entry has changed under us.
1015 * Note that we don't need to re-check the 'fdt->fd'
1016 * pointer having changed, because it always goes
1017 * hand-in-hand with 'fdt'.
1019 * If so, we need to put our ref and try again.
1021 if (unlikely(file != rcu_dereference_raw(*fdentry)) ||
1022 unlikely(rcu_dereference_raw(files->fdt) != fdt)) {
1023 fput(file);
1024 continue;
1028 * This isn't the file we're looking for or we're not
1029 * allowed to get a reference to it.
1031 if (unlikely(file->f_mode & mask)) {
1032 fput(file);
1033 return NULL;
1037 * Ok, we have a ref to the file, and checked that it
1038 * still exists.
1040 return file;
1044 static struct file *__fget_files(struct files_struct *files, unsigned int fd,
1045 fmode_t mask)
1047 struct file *file;
1049 rcu_read_lock();
1050 file = __fget_files_rcu(files, fd, mask);
1051 rcu_read_unlock();
1053 return file;
1056 static inline struct file *__fget(unsigned int fd, fmode_t mask)
1058 return __fget_files(current->files, fd, mask);
1061 struct file *fget(unsigned int fd)
1063 return __fget(fd, FMODE_PATH);
1065 EXPORT_SYMBOL(fget);
1067 struct file *fget_raw(unsigned int fd)
1069 return __fget(fd, 0);
1071 EXPORT_SYMBOL(fget_raw);
1073 struct file *fget_task(struct task_struct *task, unsigned int fd)
1075 struct file *file = NULL;
1077 task_lock(task);
1078 if (task->files)
1079 file = __fget_files(task->files, fd, 0);
1080 task_unlock(task);
1082 return file;
1085 struct file *fget_task_next(struct task_struct *task, unsigned int *ret_fd)
1087 /* Must be called with rcu_read_lock held */
1088 struct files_struct *files;
1089 unsigned int fd = *ret_fd;
1090 struct file *file = NULL;
1092 task_lock(task);
1093 files = task->files;
1094 if (files) {
1095 rcu_read_lock();
1096 for (; fd < files_fdtable(files)->max_fds; fd++) {
1097 file = __fget_files_rcu(files, fd, 0);
1098 if (file)
1099 break;
1101 rcu_read_unlock();
1103 task_unlock(task);
1104 *ret_fd = fd;
1105 return file;
1107 EXPORT_SYMBOL(fget_task_next);
1110 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
1112 * You can use this instead of fget if you satisfy all of the following
1113 * conditions:
1114 * 1) You must call fput_light before exiting the syscall and returning control
1115 * to userspace (i.e. you cannot remember the returned struct file * after
1116 * returning to userspace).
1117 * 2) You must not call filp_close on the returned struct file * in between
1118 * calls to fget_light and fput_light.
1119 * 3) You must not clone the current task in between the calls to fget_light
1120 * and fput_light.
1122 * The fput_needed flag returned by fget_light should be passed to the
1123 * corresponding fput_light.
1125 * (As an exception to rule 2, you can call filp_close between fget_light and
1126 * fput_light provided that you capture a real refcount with get_file before
1127 * the call to filp_close, and ensure that this real refcount is fput *after*
1128 * the fput_light call.)
1130 * See also the documentation in rust/kernel/file.rs.
1132 static inline struct fd __fget_light(unsigned int fd, fmode_t mask)
1134 struct files_struct *files = current->files;
1135 struct file *file;
1138 * If another thread is concurrently calling close_fd() followed
1139 * by put_files_struct(), we must not observe the old table
1140 * entry combined with the new refcount - otherwise we could
1141 * return a file that is concurrently being freed.
1143 * atomic_read_acquire() pairs with atomic_dec_and_test() in
1144 * put_files_struct().
1146 if (likely(atomic_read_acquire(&files->count) == 1)) {
1147 file = files_lookup_fd_raw(files, fd);
1148 if (!file || unlikely(file->f_mode & mask))
1149 return EMPTY_FD;
1150 return BORROWED_FD(file);
1151 } else {
1152 file = __fget_files(files, fd, mask);
1153 if (!file)
1154 return EMPTY_FD;
1155 return CLONED_FD(file);
1158 struct fd fdget(unsigned int fd)
1160 return __fget_light(fd, FMODE_PATH);
1162 EXPORT_SYMBOL(fdget);
1164 struct fd fdget_raw(unsigned int fd)
1166 return __fget_light(fd, 0);
1170 * Try to avoid f_pos locking. We only need it if the
1171 * file is marked for FMODE_ATOMIC_POS, and it can be
1172 * accessed multiple ways.
1174 * Always do it for directories, because pidfd_getfd()
1175 * can make a file accessible even if it otherwise would
1176 * not be, and for directories this is a correctness
1177 * issue, not a "POSIX requirement".
1179 static inline bool file_needs_f_pos_lock(struct file *file)
1181 return (file->f_mode & FMODE_ATOMIC_POS) &&
1182 (file_count(file) > 1 || file->f_op->iterate_shared);
1185 struct fd fdget_pos(unsigned int fd)
1187 struct fd f = fdget(fd);
1188 struct file *file = fd_file(f);
1190 if (file && file_needs_f_pos_lock(file)) {
1191 f.word |= FDPUT_POS_UNLOCK;
1192 mutex_lock(&file->f_pos_lock);
1194 return f;
1197 void __f_unlock_pos(struct file *f)
1199 mutex_unlock(&f->f_pos_lock);
1203 * We only lock f_pos if we have threads or if the file might be
1204 * shared with another process. In both cases we'll have an elevated
1205 * file count (done either by fdget() or by fork()).
1208 void set_close_on_exec(unsigned int fd, int flag)
1210 struct files_struct *files = current->files;
1211 spin_lock(&files->file_lock);
1212 __set_close_on_exec(fd, files_fdtable(files), flag);
1213 spin_unlock(&files->file_lock);
1216 bool get_close_on_exec(unsigned int fd)
1218 bool res;
1219 rcu_read_lock();
1220 res = close_on_exec(fd, current->files);
1221 rcu_read_unlock();
1222 return res;
1225 static int do_dup2(struct files_struct *files,
1226 struct file *file, unsigned fd, unsigned flags)
1227 __releases(&files->file_lock)
1229 struct file *tofree;
1230 struct fdtable *fdt;
1233 * We need to detect attempts to do dup2() over allocated but still
1234 * not finished descriptor.
1236 * POSIX is silent on the issue, we return -EBUSY.
1238 fdt = files_fdtable(files);
1239 fd = array_index_nospec(fd, fdt->max_fds);
1240 tofree = fdt->fd[fd];
1241 if (!tofree && fd_is_open(fd, fdt))
1242 goto Ebusy;
1243 get_file(file);
1244 rcu_assign_pointer(fdt->fd[fd], file);
1245 __set_open_fd(fd, fdt, flags & O_CLOEXEC);
1246 spin_unlock(&files->file_lock);
1248 if (tofree)
1249 filp_close(tofree, files);
1251 return fd;
1253 Ebusy:
1254 spin_unlock(&files->file_lock);
1255 return -EBUSY;
1258 int replace_fd(unsigned fd, struct file *file, unsigned flags)
1260 int err;
1261 struct files_struct *files = current->files;
1263 if (!file)
1264 return close_fd(fd);
1266 if (fd >= rlimit(RLIMIT_NOFILE))
1267 return -EBADF;
1269 spin_lock(&files->file_lock);
1270 err = expand_files(files, fd);
1271 if (unlikely(err < 0))
1272 goto out_unlock;
1273 return do_dup2(files, file, fd, flags);
1275 out_unlock:
1276 spin_unlock(&files->file_lock);
1277 return err;
1281 * receive_fd() - Install received file into file descriptor table
1282 * @file: struct file that was received from another process
1283 * @ufd: __user pointer to write new fd number to
1284 * @o_flags: the O_* flags to apply to the new fd entry
1286 * Installs a received file into the file descriptor table, with appropriate
1287 * checks and count updates. Optionally writes the fd number to userspace, if
1288 * @ufd is non-NULL.
1290 * This helper handles its own reference counting of the incoming
1291 * struct file.
1293 * Returns newly install fd or -ve on error.
1295 int receive_fd(struct file *file, int __user *ufd, unsigned int o_flags)
1297 int new_fd;
1298 int error;
1300 error = security_file_receive(file);
1301 if (error)
1302 return error;
1304 new_fd = get_unused_fd_flags(o_flags);
1305 if (new_fd < 0)
1306 return new_fd;
1308 if (ufd) {
1309 error = put_user(new_fd, ufd);
1310 if (error) {
1311 put_unused_fd(new_fd);
1312 return error;
1316 fd_install(new_fd, get_file(file));
1317 __receive_sock(file);
1318 return new_fd;
1320 EXPORT_SYMBOL_GPL(receive_fd);
1322 int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags)
1324 int error;
1326 error = security_file_receive(file);
1327 if (error)
1328 return error;
1329 error = replace_fd(new_fd, file, o_flags);
1330 if (error)
1331 return error;
1332 __receive_sock(file);
1333 return new_fd;
1336 static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags)
1338 int err = -EBADF;
1339 struct file *file;
1340 struct files_struct *files = current->files;
1342 if ((flags & ~O_CLOEXEC) != 0)
1343 return -EINVAL;
1345 if (unlikely(oldfd == newfd))
1346 return -EINVAL;
1348 if (newfd >= rlimit(RLIMIT_NOFILE))
1349 return -EBADF;
1351 spin_lock(&files->file_lock);
1352 err = expand_files(files, newfd);
1353 file = files_lookup_fd_locked(files, oldfd);
1354 if (unlikely(!file))
1355 goto Ebadf;
1356 if (unlikely(err < 0)) {
1357 if (err == -EMFILE)
1358 goto Ebadf;
1359 goto out_unlock;
1361 return do_dup2(files, file, newfd, flags);
1363 Ebadf:
1364 err = -EBADF;
1365 out_unlock:
1366 spin_unlock(&files->file_lock);
1367 return err;
1370 SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
1372 return ksys_dup3(oldfd, newfd, flags);
1375 SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
1377 if (unlikely(newfd == oldfd)) { /* corner case */
1378 struct files_struct *files = current->files;
1379 struct file *f;
1380 int retval = oldfd;
1382 rcu_read_lock();
1383 f = __fget_files_rcu(files, oldfd, 0);
1384 if (!f)
1385 retval = -EBADF;
1386 rcu_read_unlock();
1387 if (f)
1388 fput(f);
1389 return retval;
1391 return ksys_dup3(oldfd, newfd, 0);
1394 SYSCALL_DEFINE1(dup, unsigned int, fildes)
1396 int ret = -EBADF;
1397 struct file *file = fget_raw(fildes);
1399 if (file) {
1400 ret = get_unused_fd_flags(0);
1401 if (ret >= 0)
1402 fd_install(ret, file);
1403 else
1404 fput(file);
1406 return ret;
1409 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
1411 unsigned long nofile = rlimit(RLIMIT_NOFILE);
1412 int err;
1413 if (from >= nofile)
1414 return -EINVAL;
1415 err = alloc_fd(from, nofile, flags);
1416 if (err >= 0) {
1417 get_file(file);
1418 fd_install(err, file);
1420 return err;
1423 int iterate_fd(struct files_struct *files, unsigned n,
1424 int (*f)(const void *, struct file *, unsigned),
1425 const void *p)
1427 struct fdtable *fdt;
1428 int res = 0;
1429 if (!files)
1430 return 0;
1431 spin_lock(&files->file_lock);
1432 for (fdt = files_fdtable(files); n < fdt->max_fds; n++) {
1433 struct file *file;
1434 file = rcu_dereference_check_fdtable(files, fdt->fd[n]);
1435 if (!file)
1436 continue;
1437 res = f(p, file, n);
1438 if (res)
1439 break;
1441 spin_unlock(&files->file_lock);
1442 return res;
1444 EXPORT_SYMBOL(iterate_fd);