1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
7 * Manage the dynamic fd arrays in the process files_struct.
10 #include <linux/syscalls.h>
11 #include <linux/export.h>
13 #include <linux/kernel.h>
15 #include <linux/sched/signal.h>
16 #include <linux/slab.h>
17 #include <linux/file.h>
18 #include <linux/fdtable.h>
19 #include <linux/bitops.h>
20 #include <linux/spinlock.h>
21 #include <linux/rcupdate.h>
22 #include <linux/close_range.h>
23 #include <linux/file_ref.h>
29 * __file_ref_put - Slowpath of file_ref_put()
30 * @ref: Pointer to the reference count
31 * @cnt: Current reference count
33 * Invoked when the reference count is outside of the valid zone.
36 * True if this was the last reference with no future references
37 * possible. This signals the caller that it can safely schedule the
38 * object, which is protected by the reference counter, for
41 * False if there are still active references or the put() raced
42 * with a concurrent get()/put() pair. Caller is not allowed to
43 * deconstruct the protected object.
45 bool __file_ref_put(file_ref_t
*ref
, unsigned long cnt
)
47 /* Did this drop the last reference? */
48 if (likely(cnt
== FILE_REF_NOREF
)) {
50 * Carefully try to set the reference count to FILE_REF_DEAD.
52 * This can fail if a concurrent get() operation has
53 * elevated it again or the corresponding put() even marked
54 * it dead already. Both are valid situations and do not
55 * require a retry. If this fails the caller is not
56 * allowed to deconstruct the object.
58 if (!atomic_long_try_cmpxchg_release(&ref
->refcnt
, &cnt
, FILE_REF_DEAD
))
62 * The caller can safely schedule the object for
63 * deconstruction. Provide acquire ordering.
65 smp_acquire__after_ctrl_dep();
70 * If the reference count was already in the dead zone, then this
71 * put() operation is imbalanced. Warn, put the reference count back to
72 * DEAD and tell the caller to not deconstruct the object.
74 if (WARN_ONCE(cnt
>= FILE_REF_RELEASED
, "imbalanced put on file reference count")) {
75 atomic_long_set(&ref
->refcnt
, FILE_REF_DEAD
);
80 * This is a put() operation on a saturated refcount. Restore the
81 * mean saturation value and tell the caller to not deconstruct the
84 if (cnt
> FILE_REF_MAXREF
)
85 atomic_long_set(&ref
->refcnt
, FILE_REF_SATURATED
);
88 EXPORT_SYMBOL_GPL(__file_ref_put
);
90 unsigned int sysctl_nr_open __read_mostly
= 1024*1024;
91 unsigned int sysctl_nr_open_min
= BITS_PER_LONG
;
92 /* our min() is unusable in constant expressions ;-/ */
93 #define __const_min(x, y) ((x) < (y) ? (x) : (y))
94 unsigned int sysctl_nr_open_max
=
95 __const_min(INT_MAX
, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG
;
97 static void __free_fdtable(struct fdtable
*fdt
)
100 kvfree(fdt
->open_fds
);
104 static void free_fdtable_rcu(struct rcu_head
*rcu
)
106 __free_fdtable(container_of(rcu
, struct fdtable
, rcu
));
109 #define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr))
110 #define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
112 #define fdt_words(fdt) ((fdt)->max_fds / BITS_PER_LONG) // words in ->open_fds
114 * Copy 'count' fd bits from the old table to the new table and clear the extra
115 * space if any. This does not copy the file pointers. Called with the files
116 * spinlock held for write.
118 static inline void copy_fd_bitmaps(struct fdtable
*nfdt
, struct fdtable
*ofdt
,
119 unsigned int copy_words
)
121 unsigned int nwords
= fdt_words(nfdt
);
123 bitmap_copy_and_extend(nfdt
->open_fds
, ofdt
->open_fds
,
124 copy_words
* BITS_PER_LONG
, nwords
* BITS_PER_LONG
);
125 bitmap_copy_and_extend(nfdt
->close_on_exec
, ofdt
->close_on_exec
,
126 copy_words
* BITS_PER_LONG
, nwords
* BITS_PER_LONG
);
127 bitmap_copy_and_extend(nfdt
->full_fds_bits
, ofdt
->full_fds_bits
,
132 * Copy all file descriptors from the old table to the new, expanded table and
133 * clear the extra space. Called with the files spinlock held for write.
135 static void copy_fdtable(struct fdtable
*nfdt
, struct fdtable
*ofdt
)
139 BUG_ON(nfdt
->max_fds
< ofdt
->max_fds
);
141 cpy
= ofdt
->max_fds
* sizeof(struct file
*);
142 set
= (nfdt
->max_fds
- ofdt
->max_fds
) * sizeof(struct file
*);
143 memcpy(nfdt
->fd
, ofdt
->fd
, cpy
);
144 memset((char *)nfdt
->fd
+ cpy
, 0, set
);
146 copy_fd_bitmaps(nfdt
, ofdt
, fdt_words(ofdt
));
150 * Note how the fdtable bitmap allocations very much have to be a multiple of
151 * BITS_PER_LONG. This is not only because we walk those things in chunks of
152 * 'unsigned long' in some places, but simply because that is how the Linux
153 * kernel bitmaps are defined to work: they are not "bits in an array of bytes",
154 * they are very much "bits in an array of unsigned long".
156 static struct fdtable
*alloc_fdtable(unsigned int slots_wanted
)
163 * Figure out how many fds we actually want to support in this fdtable.
164 * Allocation steps are keyed to the size of the fdarray, since it
165 * grows far faster than any of the other dynamic data. We try to fit
166 * the fdarray into comfortable page-tuned chunks: starting at 1024B
167 * and growing in powers of two from there on. Since we called only
168 * with slots_wanted > BITS_PER_LONG (embedded instance in files->fdtab
169 * already gives BITS_PER_LONG slots), the above boils down to
170 * 1. use the smallest power of two large enough to give us that many
172 * 2. on 32bit skip 64 and 128 - the minimal capacity we want there is
173 * 256 slots (i.e. 1Kb fd array).
174 * 3. on 64bit don't skip anything, 1Kb fd array means 128 slots there
175 * and we are never going to be asked for 64 or less.
177 if (IS_ENABLED(CONFIG_32BIT
) && slots_wanted
< 256)
180 nr
= roundup_pow_of_two(slots_wanted
);
182 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
183 * had been set lower between the check in expand_files() and here.
185 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
186 * bitmaps handling below becomes unpleasant, to put it mildly...
188 if (unlikely(nr
> sysctl_nr_open
)) {
189 nr
= round_down(sysctl_nr_open
, BITS_PER_LONG
);
190 if (nr
< slots_wanted
)
191 return ERR_PTR(-EMFILE
);
194 fdt
= kmalloc(sizeof(struct fdtable
), GFP_KERNEL_ACCOUNT
);
198 data
= kvmalloc_array(nr
, sizeof(struct file
*), GFP_KERNEL_ACCOUNT
);
203 data
= kvmalloc(max_t(size_t,
204 2 * nr
/ BITS_PER_BYTE
+ BITBIT_SIZE(nr
), L1_CACHE_BYTES
),
208 fdt
->open_fds
= data
;
209 data
+= nr
/ BITS_PER_BYTE
;
210 fdt
->close_on_exec
= data
;
211 data
+= nr
/ BITS_PER_BYTE
;
212 fdt
->full_fds_bits
= data
;
221 return ERR_PTR(-ENOMEM
);
225 * Expand the file descriptor table.
226 * This function will allocate a new fdtable and both fd array and fdset, of
228 * Return <0 error code on error; 0 on successful completion.
229 * The files->file_lock should be held on entry, and will be held on exit.
231 static int expand_fdtable(struct files_struct
*files
, unsigned int nr
)
232 __releases(files
->file_lock
)
233 __acquires(files
->file_lock
)
235 struct fdtable
*new_fdt
, *cur_fdt
;
237 spin_unlock(&files
->file_lock
);
238 new_fdt
= alloc_fdtable(nr
+ 1);
240 /* make sure all fd_install() have seen resize_in_progress
241 * or have finished their rcu_read_lock_sched() section.
243 if (atomic_read(&files
->count
) > 1)
246 spin_lock(&files
->file_lock
);
248 return PTR_ERR(new_fdt
);
249 cur_fdt
= files_fdtable(files
);
250 BUG_ON(nr
< cur_fdt
->max_fds
);
251 copy_fdtable(new_fdt
, cur_fdt
);
252 rcu_assign_pointer(files
->fdt
, new_fdt
);
253 if (cur_fdt
!= &files
->fdtab
)
254 call_rcu(&cur_fdt
->rcu
, free_fdtable_rcu
);
255 /* coupled with smp_rmb() in fd_install() */
262 * This function will expand the file structures, if the requested size exceeds
263 * the current capacity and there is room for expansion.
264 * Return <0 error code on error; 0 on success.
265 * The files->file_lock should be held on entry, and will be held on exit.
267 static int expand_files(struct files_struct
*files
, unsigned int nr
)
268 __releases(files
->file_lock
)
269 __acquires(files
->file_lock
)
275 fdt
= files_fdtable(files
);
277 /* Do we need to expand? */
278 if (nr
< fdt
->max_fds
)
282 if (nr
>= sysctl_nr_open
)
285 if (unlikely(files
->resize_in_progress
)) {
286 spin_unlock(&files
->file_lock
);
287 wait_event(files
->resize_wait
, !files
->resize_in_progress
);
288 spin_lock(&files
->file_lock
);
292 /* All good, so we try */
293 files
->resize_in_progress
= true;
294 error
= expand_fdtable(files
, nr
);
295 files
->resize_in_progress
= false;
297 wake_up_all(&files
->resize_wait
);
301 static inline void __set_close_on_exec(unsigned int fd
, struct fdtable
*fdt
,
305 __set_bit(fd
, fdt
->close_on_exec
);
307 if (test_bit(fd
, fdt
->close_on_exec
))
308 __clear_bit(fd
, fdt
->close_on_exec
);
312 static inline void __set_open_fd(unsigned int fd
, struct fdtable
*fdt
, bool set
)
314 __set_bit(fd
, fdt
->open_fds
);
315 __set_close_on_exec(fd
, fdt
, set
);
317 if (!~fdt
->open_fds
[fd
])
318 __set_bit(fd
, fdt
->full_fds_bits
);
321 static inline void __clear_open_fd(unsigned int fd
, struct fdtable
*fdt
)
323 __clear_bit(fd
, fdt
->open_fds
);
325 if (test_bit(fd
, fdt
->full_fds_bits
))
326 __clear_bit(fd
, fdt
->full_fds_bits
);
329 static inline bool fd_is_open(unsigned int fd
, const struct fdtable
*fdt
)
331 return test_bit(fd
, fdt
->open_fds
);
335 * Note that a sane fdtable size always has to be a multiple of
336 * BITS_PER_LONG, since we have bitmaps that are sized by this.
338 * punch_hole is optional - when close_range() is asked to unshare
339 * and close, we don't need to copy descriptors in that range, so
340 * a smaller cloned descriptor table might suffice if the last
341 * currently opened descriptor falls into that range.
343 static unsigned int sane_fdtable_size(struct fdtable
*fdt
, struct fd_range
*punch_hole
)
345 unsigned int last
= find_last_bit(fdt
->open_fds
, fdt
->max_fds
);
347 if (last
== fdt
->max_fds
)
348 return NR_OPEN_DEFAULT
;
349 if (punch_hole
&& punch_hole
->to
>= last
&& punch_hole
->from
<= last
) {
350 last
= find_last_bit(fdt
->open_fds
, punch_hole
->from
);
351 if (last
== punch_hole
->from
)
352 return NR_OPEN_DEFAULT
;
354 return ALIGN(last
+ 1, BITS_PER_LONG
);
358 * Allocate a new descriptor table and copy contents from the passed in
359 * instance. Returns a pointer to cloned table on success, ERR_PTR()
360 * on failure. For 'punch_hole' see sane_fdtable_size().
362 struct files_struct
*dup_fd(struct files_struct
*oldf
, struct fd_range
*punch_hole
)
364 struct files_struct
*newf
;
365 struct file
**old_fds
, **new_fds
;
366 unsigned int open_files
, i
;
367 struct fdtable
*old_fdt
, *new_fdt
;
369 newf
= kmem_cache_alloc(files_cachep
, GFP_KERNEL
);
371 return ERR_PTR(-ENOMEM
);
373 atomic_set(&newf
->count
, 1);
375 spin_lock_init(&newf
->file_lock
);
376 newf
->resize_in_progress
= false;
377 init_waitqueue_head(&newf
->resize_wait
);
379 new_fdt
= &newf
->fdtab
;
380 new_fdt
->max_fds
= NR_OPEN_DEFAULT
;
381 new_fdt
->close_on_exec
= newf
->close_on_exec_init
;
382 new_fdt
->open_fds
= newf
->open_fds_init
;
383 new_fdt
->full_fds_bits
= newf
->full_fds_bits_init
;
384 new_fdt
->fd
= &newf
->fd_array
[0];
386 spin_lock(&oldf
->file_lock
);
387 old_fdt
= files_fdtable(oldf
);
388 open_files
= sane_fdtable_size(old_fdt
, punch_hole
);
391 * Check whether we need to allocate a larger fd array and fd set.
393 while (unlikely(open_files
> new_fdt
->max_fds
)) {
394 spin_unlock(&oldf
->file_lock
);
396 if (new_fdt
!= &newf
->fdtab
)
397 __free_fdtable(new_fdt
);
399 new_fdt
= alloc_fdtable(open_files
);
400 if (IS_ERR(new_fdt
)) {
401 kmem_cache_free(files_cachep
, newf
);
402 return ERR_CAST(new_fdt
);
406 * Reacquire the oldf lock and a pointer to its fd table
407 * who knows it may have a new bigger fd table. We need
408 * the latest pointer.
410 spin_lock(&oldf
->file_lock
);
411 old_fdt
= files_fdtable(oldf
);
412 open_files
= sane_fdtable_size(old_fdt
, punch_hole
);
415 copy_fd_bitmaps(new_fdt
, old_fdt
, open_files
/ BITS_PER_LONG
);
417 old_fds
= old_fdt
->fd
;
418 new_fds
= new_fdt
->fd
;
420 for (i
= open_files
; i
!= 0; i
--) {
421 struct file
*f
= *old_fds
++;
426 * The fd may be claimed in the fd bitmap but not yet
427 * instantiated in the files array if a sibling thread
428 * is partway through open(). So make sure that this
429 * fd is available to the new process.
431 __clear_open_fd(open_files
- i
, new_fdt
);
433 rcu_assign_pointer(*new_fds
++, f
);
435 spin_unlock(&oldf
->file_lock
);
437 /* clear the remainder */
438 memset(new_fds
, 0, (new_fdt
->max_fds
- open_files
) * sizeof(struct file
*));
440 rcu_assign_pointer(newf
->fdt
, new_fdt
);
445 static struct fdtable
*close_files(struct files_struct
* files
)
448 * It is safe to dereference the fd table without RCU or
449 * ->file_lock because this is the last reference to the
452 struct fdtable
*fdt
= rcu_dereference_raw(files
->fdt
);
453 unsigned int i
, j
= 0;
457 i
= j
* BITS_PER_LONG
;
458 if (i
>= fdt
->max_fds
)
460 set
= fdt
->open_fds
[j
++];
463 struct file
*file
= fdt
->fd
[i
];
465 filp_close(file
, files
);
477 void put_files_struct(struct files_struct
*files
)
479 if (atomic_dec_and_test(&files
->count
)) {
480 struct fdtable
*fdt
= close_files(files
);
482 /* free the arrays if they are not embedded */
483 if (fdt
!= &files
->fdtab
)
485 kmem_cache_free(files_cachep
, files
);
489 void exit_files(struct task_struct
*tsk
)
491 struct files_struct
* files
= tsk
->files
;
497 put_files_struct(files
);
501 struct files_struct init_files
= {
502 .count
= ATOMIC_INIT(1),
503 .fdt
= &init_files
.fdtab
,
505 .max_fds
= NR_OPEN_DEFAULT
,
506 .fd
= &init_files
.fd_array
[0],
507 .close_on_exec
= init_files
.close_on_exec_init
,
508 .open_fds
= init_files
.open_fds_init
,
509 .full_fds_bits
= init_files
.full_fds_bits_init
,
511 .file_lock
= __SPIN_LOCK_UNLOCKED(init_files
.file_lock
),
512 .resize_wait
= __WAIT_QUEUE_HEAD_INITIALIZER(init_files
.resize_wait
),
515 static unsigned int find_next_fd(struct fdtable
*fdt
, unsigned int start
)
517 unsigned int maxfd
= fdt
->max_fds
; /* always multiple of BITS_PER_LONG */
518 unsigned int maxbit
= maxfd
/ BITS_PER_LONG
;
519 unsigned int bitbit
= start
/ BITS_PER_LONG
;
523 * Try to avoid looking at the second level bitmap
525 bit
= find_next_zero_bit(&fdt
->open_fds
[bitbit
], BITS_PER_LONG
,
526 start
& (BITS_PER_LONG
- 1));
527 if (bit
< BITS_PER_LONG
)
528 return bit
+ bitbit
* BITS_PER_LONG
;
530 bitbit
= find_next_zero_bit(fdt
->full_fds_bits
, maxbit
, bitbit
) * BITS_PER_LONG
;
535 return find_next_zero_bit(fdt
->open_fds
, maxfd
, start
);
539 * allocate a file descriptor, mark it busy.
541 static int alloc_fd(unsigned start
, unsigned end
, unsigned flags
)
543 struct files_struct
*files
= current
->files
;
548 spin_lock(&files
->file_lock
);
550 fdt
= files_fdtable(files
);
552 if (fd
< files
->next_fd
)
555 if (likely(fd
< fdt
->max_fds
))
556 fd
= find_next_fd(fdt
, fd
);
559 * N.B. For clone tasks sharing a files structure, this test
560 * will limit the total number of files that can be opened.
563 if (unlikely(fd
>= end
))
566 if (unlikely(fd
>= fdt
->max_fds
)) {
567 error
= expand_files(files
, fd
);
574 if (start
<= files
->next_fd
)
575 files
->next_fd
= fd
+ 1;
577 __set_open_fd(fd
, fdt
, flags
& O_CLOEXEC
);
581 spin_unlock(&files
->file_lock
);
585 int __get_unused_fd_flags(unsigned flags
, unsigned long nofile
)
587 return alloc_fd(0, nofile
, flags
);
590 int get_unused_fd_flags(unsigned flags
)
592 return __get_unused_fd_flags(flags
, rlimit(RLIMIT_NOFILE
));
594 EXPORT_SYMBOL(get_unused_fd_flags
);
596 static void __put_unused_fd(struct files_struct
*files
, unsigned int fd
)
598 struct fdtable
*fdt
= files_fdtable(files
);
599 __clear_open_fd(fd
, fdt
);
600 if (fd
< files
->next_fd
)
604 void put_unused_fd(unsigned int fd
)
606 struct files_struct
*files
= current
->files
;
607 spin_lock(&files
->file_lock
);
608 __put_unused_fd(files
, fd
);
609 spin_unlock(&files
->file_lock
);
612 EXPORT_SYMBOL(put_unused_fd
);
615 * Install a file pointer in the fd array.
617 * The VFS is full of places where we drop the files lock between
618 * setting the open_fds bitmap and installing the file in the file
619 * array. At any such point, we are vulnerable to a dup2() race
620 * installing a file in the array before us. We need to detect this and
621 * fput() the struct file we are about to overwrite in this case.
623 * It should never happen - if we allow dup2() do it, _really_ bad things
626 * This consumes the "file" refcount, so callers should treat it
627 * as if they had called fput(file).
630 void fd_install(unsigned int fd
, struct file
*file
)
632 struct files_struct
*files
= current
->files
;
635 if (WARN_ON_ONCE(unlikely(file
->f_mode
& FMODE_BACKING
)))
638 rcu_read_lock_sched();
640 if (unlikely(files
->resize_in_progress
)) {
641 rcu_read_unlock_sched();
642 spin_lock(&files
->file_lock
);
643 fdt
= files_fdtable(files
);
644 WARN_ON(fdt
->fd
[fd
] != NULL
);
645 rcu_assign_pointer(fdt
->fd
[fd
], file
);
646 spin_unlock(&files
->file_lock
);
649 /* coupled with smp_wmb() in expand_fdtable() */
651 fdt
= rcu_dereference_sched(files
->fdt
);
652 BUG_ON(fdt
->fd
[fd
] != NULL
);
653 rcu_assign_pointer(fdt
->fd
[fd
], file
);
654 rcu_read_unlock_sched();
657 EXPORT_SYMBOL(fd_install
);
660 * file_close_fd_locked - return file associated with fd
661 * @files: file struct to retrieve file from
662 * @fd: file descriptor to retrieve file for
664 * Doesn't take a separate reference count.
666 * Context: files_lock must be held.
668 * Returns: The file associated with @fd (NULL if @fd is not open)
670 struct file
*file_close_fd_locked(struct files_struct
*files
, unsigned fd
)
672 struct fdtable
*fdt
= files_fdtable(files
);
675 lockdep_assert_held(&files
->file_lock
);
677 if (fd
>= fdt
->max_fds
)
680 fd
= array_index_nospec(fd
, fdt
->max_fds
);
683 rcu_assign_pointer(fdt
->fd
[fd
], NULL
);
684 __put_unused_fd(files
, fd
);
689 int close_fd(unsigned fd
)
691 struct files_struct
*files
= current
->files
;
694 spin_lock(&files
->file_lock
);
695 file
= file_close_fd_locked(files
, fd
);
696 spin_unlock(&files
->file_lock
);
700 return filp_close(file
, files
);
702 EXPORT_SYMBOL(close_fd
);
705 * last_fd - return last valid index into fd table
706 * @fdt: File descriptor table.
708 * Context: Either rcu read lock or files_lock must be held.
710 * Returns: Last valid index into fdtable.
712 static inline unsigned last_fd(struct fdtable
*fdt
)
714 return fdt
->max_fds
- 1;
717 static inline void __range_cloexec(struct files_struct
*cur_fds
,
718 unsigned int fd
, unsigned int max_fd
)
722 /* make sure we're using the correct maximum value */
723 spin_lock(&cur_fds
->file_lock
);
724 fdt
= files_fdtable(cur_fds
);
725 max_fd
= min(last_fd(fdt
), max_fd
);
727 bitmap_set(fdt
->close_on_exec
, fd
, max_fd
- fd
+ 1);
728 spin_unlock(&cur_fds
->file_lock
);
731 static inline void __range_close(struct files_struct
*files
, unsigned int fd
,
737 spin_lock(&files
->file_lock
);
738 n
= last_fd(files_fdtable(files
));
739 max_fd
= min(max_fd
, n
);
741 for (; fd
<= max_fd
; fd
++) {
742 file
= file_close_fd_locked(files
, fd
);
744 spin_unlock(&files
->file_lock
);
745 filp_close(file
, files
);
747 spin_lock(&files
->file_lock
);
748 } else if (need_resched()) {
749 spin_unlock(&files
->file_lock
);
751 spin_lock(&files
->file_lock
);
754 spin_unlock(&files
->file_lock
);
758 * sys_close_range() - Close all file descriptors in a given range.
760 * @fd: starting file descriptor to close
761 * @max_fd: last file descriptor to close
762 * @flags: CLOSE_RANGE flags.
764 * This closes a range of file descriptors. All file descriptors
765 * from @fd up to and including @max_fd are closed.
766 * Currently, errors to close a given file descriptor are ignored.
768 SYSCALL_DEFINE3(close_range
, unsigned int, fd
, unsigned int, max_fd
,
771 struct task_struct
*me
= current
;
772 struct files_struct
*cur_fds
= me
->files
, *fds
= NULL
;
774 if (flags
& ~(CLOSE_RANGE_UNSHARE
| CLOSE_RANGE_CLOEXEC
))
780 if ((flags
& CLOSE_RANGE_UNSHARE
) && atomic_read(&cur_fds
->count
) > 1) {
781 struct fd_range range
= {fd
, max_fd
}, *punch_hole
= &range
;
784 * If the caller requested all fds to be made cloexec we always
785 * copy all of the file descriptors since they still want to
788 if (flags
& CLOSE_RANGE_CLOEXEC
)
791 fds
= dup_fd(cur_fds
, punch_hole
);
795 * We used to share our file descriptor table, and have now
796 * created a private one, make sure we're using it below.
801 if (flags
& CLOSE_RANGE_CLOEXEC
)
802 __range_cloexec(cur_fds
, fd
, max_fd
);
804 __range_close(cur_fds
, fd
, max_fd
);
808 * We're done closing the files we were supposed to. Time to install
809 * the new file descriptor table and drop the old one.
814 put_files_struct(fds
);
821 * file_close_fd - return file associated with fd
822 * @fd: file descriptor to retrieve file for
824 * Doesn't take a separate reference count.
826 * Returns: The file associated with @fd (NULL if @fd is not open)
828 struct file
*file_close_fd(unsigned int fd
)
830 struct files_struct
*files
= current
->files
;
833 spin_lock(&files
->file_lock
);
834 file
= file_close_fd_locked(files
, fd
);
835 spin_unlock(&files
->file_lock
);
840 void do_close_on_exec(struct files_struct
*files
)
845 /* exec unshares first */
846 spin_lock(&files
->file_lock
);
849 unsigned fd
= i
* BITS_PER_LONG
;
850 fdt
= files_fdtable(files
);
851 if (fd
>= fdt
->max_fds
)
853 set
= fdt
->close_on_exec
[i
];
856 fdt
->close_on_exec
[i
] = 0;
857 for ( ; set
; fd
++, set
>>= 1) {
864 rcu_assign_pointer(fdt
->fd
[fd
], NULL
);
865 __put_unused_fd(files
, fd
);
866 spin_unlock(&files
->file_lock
);
867 filp_close(file
, files
);
869 spin_lock(&files
->file_lock
);
873 spin_unlock(&files
->file_lock
);
876 static struct file
*__get_file_rcu(struct file __rcu
**f
)
878 struct file __rcu
*file
;
879 struct file __rcu
*file_reloaded
;
880 struct file __rcu
*file_reloaded_cmp
;
882 file
= rcu_dereference_raw(*f
);
886 if (unlikely(!file_ref_get(&file
->f_ref
)))
887 return ERR_PTR(-EAGAIN
);
889 file_reloaded
= rcu_dereference_raw(*f
);
892 * Ensure that all accesses have a dependency on the load from
893 * rcu_dereference_raw() above so we get correct ordering
894 * between reuse/allocation and the pointer check below.
896 file_reloaded_cmp
= file_reloaded
;
897 OPTIMIZER_HIDE_VAR(file_reloaded_cmp
);
900 * file_ref_get() above provided a full memory barrier when we
901 * acquired a reference.
903 * This is paired with the write barrier from assigning to the
904 * __rcu protected file pointer so that if that pointer still
905 * matches the current file, we know we have successfully
906 * acquired a reference to the right file.
908 * If the pointers don't match the file has been reallocated by
909 * SLAB_TYPESAFE_BY_RCU.
911 if (file
== file_reloaded_cmp
)
912 return file_reloaded
;
915 return ERR_PTR(-EAGAIN
);
919 * get_file_rcu - try go get a reference to a file under rcu
920 * @f: the file to get a reference on
922 * This function tries to get a reference on @f carefully verifying that
923 * @f hasn't been reused.
925 * This function should rarely have to be used and only by users who
926 * understand the implications of SLAB_TYPESAFE_BY_RCU. Try to avoid it.
928 * Return: Returns @f with the reference count increased or NULL.
930 struct file
*get_file_rcu(struct file __rcu
**f
)
933 struct file __rcu
*file
;
935 file
= __get_file_rcu(f
);
940 EXPORT_SYMBOL_GPL(get_file_rcu
);
943 * get_file_active - try go get a reference to a file
944 * @f: the file to get a reference on
946 * In contast to get_file_rcu() the pointer itself isn't part of the
947 * reference counting.
949 * This function should rarely have to be used and only by users who
950 * understand the implications of SLAB_TYPESAFE_BY_RCU. Try to avoid it.
952 * Return: Returns @f with the reference count increased or NULL.
954 struct file
*get_file_active(struct file
**f
)
956 struct file __rcu
*file
;
959 file
= __get_file_rcu(f
);
965 EXPORT_SYMBOL_GPL(get_file_active
);
967 static inline struct file
*__fget_files_rcu(struct files_struct
*files
,
968 unsigned int fd
, fmode_t mask
)
972 struct fdtable
*fdt
= rcu_dereference_raw(files
->fdt
);
973 struct file __rcu
**fdentry
;
974 unsigned long nospec_mask
;
976 /* Mask is a 0 for invalid fd's, ~0 for valid ones */
977 nospec_mask
= array_index_mask_nospec(fd
, fdt
->max_fds
);
980 * fdentry points to the 'fd' offset, or fdt->fd[0].
981 * Loading from fdt->fd[0] is always safe, because the
982 * array always exists.
984 fdentry
= fdt
->fd
+ (fd
& nospec_mask
);
986 /* Do the load, then mask any invalid result */
987 file
= rcu_dereference_raw(*fdentry
);
988 file
= (void *)(nospec_mask
& (unsigned long)file
);
993 * Ok, we have a file pointer that was valid at
994 * some point, but it might have become stale since.
996 * We need to confirm it by incrementing the refcount
997 * and then check the lookup again.
999 * file_ref_get() gives us a full memory barrier. We
1000 * only really need an 'acquire' one to protect the
1001 * loads below, but we don't have that.
1003 if (unlikely(!file_ref_get(&file
->f_ref
)))
1007 * Such a race can take two forms:
1009 * (a) the file ref already went down to zero and the
1010 * file hasn't been reused yet or the file count
1011 * isn't zero but the file has already been reused.
1013 * (b) the file table entry has changed under us.
1014 * Note that we don't need to re-check the 'fdt->fd'
1015 * pointer having changed, because it always goes
1016 * hand-in-hand with 'fdt'.
1018 * If so, we need to put our ref and try again.
1020 if (unlikely(file
!= rcu_dereference_raw(*fdentry
)) ||
1021 unlikely(rcu_dereference_raw(files
->fdt
) != fdt
)) {
1027 * This isn't the file we're looking for or we're not
1028 * allowed to get a reference to it.
1030 if (unlikely(file
->f_mode
& mask
)) {
1036 * Ok, we have a ref to the file, and checked that it
1043 static struct file
*__fget_files(struct files_struct
*files
, unsigned int fd
,
1049 file
= __fget_files_rcu(files
, fd
, mask
);
1055 static inline struct file
*__fget(unsigned int fd
, fmode_t mask
)
1057 return __fget_files(current
->files
, fd
, mask
);
1060 struct file
*fget(unsigned int fd
)
1062 return __fget(fd
, FMODE_PATH
);
1064 EXPORT_SYMBOL(fget
);
1066 struct file
*fget_raw(unsigned int fd
)
1068 return __fget(fd
, 0);
1070 EXPORT_SYMBOL(fget_raw
);
1072 struct file
*fget_task(struct task_struct
*task
, unsigned int fd
)
1074 struct file
*file
= NULL
;
1078 file
= __fget_files(task
->files
, fd
, 0);
1084 struct file
*fget_task_next(struct task_struct
*task
, unsigned int *ret_fd
)
1086 /* Must be called with rcu_read_lock held */
1087 struct files_struct
*files
;
1088 unsigned int fd
= *ret_fd
;
1089 struct file
*file
= NULL
;
1092 files
= task
->files
;
1095 for (; fd
< files_fdtable(files
)->max_fds
; fd
++) {
1096 file
= __fget_files_rcu(files
, fd
, 0);
1106 EXPORT_SYMBOL(fget_task_next
);
1109 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
1111 * You can use this instead of fget if you satisfy all of the following
1113 * 1) You must call fput_light before exiting the syscall and returning control
1114 * to userspace (i.e. you cannot remember the returned struct file * after
1115 * returning to userspace).
1116 * 2) You must not call filp_close on the returned struct file * in between
1117 * calls to fget_light and fput_light.
1118 * 3) You must not clone the current task in between the calls to fget_light
1121 * The fput_needed flag returned by fget_light should be passed to the
1122 * corresponding fput_light.
1124 * (As an exception to rule 2, you can call filp_close between fget_light and
1125 * fput_light provided that you capture a real refcount with get_file before
1126 * the call to filp_close, and ensure that this real refcount is fput *after*
1127 * the fput_light call.)
1129 * See also the documentation in rust/kernel/file.rs.
1131 static inline struct fd
__fget_light(unsigned int fd
, fmode_t mask
)
1133 struct files_struct
*files
= current
->files
;
1137 * If another thread is concurrently calling close_fd() followed
1138 * by put_files_struct(), we must not observe the old table
1139 * entry combined with the new refcount - otherwise we could
1140 * return a file that is concurrently being freed.
1142 * atomic_read_acquire() pairs with atomic_dec_and_test() in
1143 * put_files_struct().
1145 if (likely(atomic_read_acquire(&files
->count
) == 1)) {
1146 file
= files_lookup_fd_raw(files
, fd
);
1147 if (!file
|| unlikely(file
->f_mode
& mask
))
1149 return BORROWED_FD(file
);
1151 file
= __fget_files(files
, fd
, mask
);
1154 return CLONED_FD(file
);
1157 struct fd
fdget(unsigned int fd
)
1159 return __fget_light(fd
, FMODE_PATH
);
1161 EXPORT_SYMBOL(fdget
);
1163 struct fd
fdget_raw(unsigned int fd
)
1165 return __fget_light(fd
, 0);
1169 * Try to avoid f_pos locking. We only need it if the
1170 * file is marked for FMODE_ATOMIC_POS, and it can be
1171 * accessed multiple ways.
1173 * Always do it for directories, because pidfd_getfd()
1174 * can make a file accessible even if it otherwise would
1175 * not be, and for directories this is a correctness
1176 * issue, not a "POSIX requirement".
1178 static inline bool file_needs_f_pos_lock(struct file
*file
)
1180 return (file
->f_mode
& FMODE_ATOMIC_POS
) &&
1181 (file_count(file
) > 1 || file
->f_op
->iterate_shared
);
1184 struct fd
fdget_pos(unsigned int fd
)
1186 struct fd f
= fdget(fd
);
1187 struct file
*file
= fd_file(f
);
1189 if (file
&& file_needs_f_pos_lock(file
)) {
1190 f
.word
|= FDPUT_POS_UNLOCK
;
1191 mutex_lock(&file
->f_pos_lock
);
1196 void __f_unlock_pos(struct file
*f
)
1198 mutex_unlock(&f
->f_pos_lock
);
1202 * We only lock f_pos if we have threads or if the file might be
1203 * shared with another process. In both cases we'll have an elevated
1204 * file count (done either by fdget() or by fork()).
1207 void set_close_on_exec(unsigned int fd
, int flag
)
1209 struct files_struct
*files
= current
->files
;
1210 spin_lock(&files
->file_lock
);
1211 __set_close_on_exec(fd
, files_fdtable(files
), flag
);
1212 spin_unlock(&files
->file_lock
);
1215 bool get_close_on_exec(unsigned int fd
)
1219 res
= close_on_exec(fd
, current
->files
);
1224 static int do_dup2(struct files_struct
*files
,
1225 struct file
*file
, unsigned fd
, unsigned flags
)
1226 __releases(&files
->file_lock
)
1228 struct file
*tofree
;
1229 struct fdtable
*fdt
;
1232 * We need to detect attempts to do dup2() over allocated but still
1233 * not finished descriptor. NB: OpenBSD avoids that at the price of
1234 * extra work in their equivalent of fget() - they insert struct
1235 * file immediately after grabbing descriptor, mark it larval if
1236 * more work (e.g. actual opening) is needed and make sure that
1237 * fget() treats larval files as absent. Potentially interesting,
1238 * but while extra work in fget() is trivial, locking implications
1239 * and amount of surgery on open()-related paths in VFS are not.
1240 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
1241 * deadlocks in rather amusing ways, AFAICS. All of that is out of
1242 * scope of POSIX or SUS, since neither considers shared descriptor
1243 * tables and this condition does not arise without those.
1245 fdt
= files_fdtable(files
);
1246 fd
= array_index_nospec(fd
, fdt
->max_fds
);
1247 tofree
= fdt
->fd
[fd
];
1248 if (!tofree
&& fd_is_open(fd
, fdt
))
1251 rcu_assign_pointer(fdt
->fd
[fd
], file
);
1252 __set_open_fd(fd
, fdt
, flags
& O_CLOEXEC
);
1253 spin_unlock(&files
->file_lock
);
1256 filp_close(tofree
, files
);
1261 spin_unlock(&files
->file_lock
);
1265 int replace_fd(unsigned fd
, struct file
*file
, unsigned flags
)
1268 struct files_struct
*files
= current
->files
;
1271 return close_fd(fd
);
1273 if (fd
>= rlimit(RLIMIT_NOFILE
))
1276 spin_lock(&files
->file_lock
);
1277 err
= expand_files(files
, fd
);
1278 if (unlikely(err
< 0))
1280 return do_dup2(files
, file
, fd
, flags
);
1283 spin_unlock(&files
->file_lock
);
1288 * receive_fd() - Install received file into file descriptor table
1289 * @file: struct file that was received from another process
1290 * @ufd: __user pointer to write new fd number to
1291 * @o_flags: the O_* flags to apply to the new fd entry
1293 * Installs a received file into the file descriptor table, with appropriate
1294 * checks and count updates. Optionally writes the fd number to userspace, if
1297 * This helper handles its own reference counting of the incoming
1300 * Returns newly install fd or -ve on error.
1302 int receive_fd(struct file
*file
, int __user
*ufd
, unsigned int o_flags
)
1307 error
= security_file_receive(file
);
1311 new_fd
= get_unused_fd_flags(o_flags
);
1316 error
= put_user(new_fd
, ufd
);
1318 put_unused_fd(new_fd
);
1323 fd_install(new_fd
, get_file(file
));
1324 __receive_sock(file
);
1327 EXPORT_SYMBOL_GPL(receive_fd
);
1329 int receive_fd_replace(int new_fd
, struct file
*file
, unsigned int o_flags
)
1333 error
= security_file_receive(file
);
1336 error
= replace_fd(new_fd
, file
, o_flags
);
1339 __receive_sock(file
);
1343 static int ksys_dup3(unsigned int oldfd
, unsigned int newfd
, int flags
)
1347 struct files_struct
*files
= current
->files
;
1349 if ((flags
& ~O_CLOEXEC
) != 0)
1352 if (unlikely(oldfd
== newfd
))
1355 if (newfd
>= rlimit(RLIMIT_NOFILE
))
1358 spin_lock(&files
->file_lock
);
1359 err
= expand_files(files
, newfd
);
1360 file
= files_lookup_fd_locked(files
, oldfd
);
1361 if (unlikely(!file
))
1363 if (unlikely(err
< 0)) {
1368 return do_dup2(files
, file
, newfd
, flags
);
1373 spin_unlock(&files
->file_lock
);
1377 SYSCALL_DEFINE3(dup3
, unsigned int, oldfd
, unsigned int, newfd
, int, flags
)
1379 return ksys_dup3(oldfd
, newfd
, flags
);
1382 SYSCALL_DEFINE2(dup2
, unsigned int, oldfd
, unsigned int, newfd
)
1384 if (unlikely(newfd
== oldfd
)) { /* corner case */
1385 struct files_struct
*files
= current
->files
;
1390 f
= __fget_files_rcu(files
, oldfd
, 0);
1398 return ksys_dup3(oldfd
, newfd
, 0);
1401 SYSCALL_DEFINE1(dup
, unsigned int, fildes
)
1404 struct file
*file
= fget_raw(fildes
);
1407 ret
= get_unused_fd_flags(0);
1409 fd_install(ret
, file
);
1416 int f_dupfd(unsigned int from
, struct file
*file
, unsigned flags
)
1418 unsigned long nofile
= rlimit(RLIMIT_NOFILE
);
1422 err
= alloc_fd(from
, nofile
, flags
);
1425 fd_install(err
, file
);
1430 int iterate_fd(struct files_struct
*files
, unsigned n
,
1431 int (*f
)(const void *, struct file
*, unsigned),
1434 struct fdtable
*fdt
;
1438 spin_lock(&files
->file_lock
);
1439 for (fdt
= files_fdtable(files
); n
< fdt
->max_fds
; n
++) {
1441 file
= rcu_dereference_check_fdtable(files
, fdt
->fd
[n
]);
1444 res
= f(p
, file
, n
);
1448 spin_unlock(&files
->file_lock
);
1451 EXPORT_SYMBOL(iterate_fd
);