4 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
6 * Manage the dynamic fd arrays in the process files_struct.
9 #include <linux/syscalls.h>
10 #include <linux/export.h>
13 #include <linux/mmzone.h>
14 #include <linux/time.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
18 #include <linux/file.h>
19 #include <linux/fdtable.h>
20 #include <linux/bitops.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/rcupdate.h>
24 #include <linux/workqueue.h>
26 int sysctl_nr_open __read_mostly
= 1024*1024;
27 int sysctl_nr_open_min
= BITS_PER_LONG
;
28 /* our max() is unusable in constant expressions ;-/ */
29 #define __const_max(x, y) ((x) < (y) ? (x) : (y))
30 int sysctl_nr_open_max
= __const_max(INT_MAX
, ~(size_t)0/sizeof(void *)) &
33 static void *alloc_fdmem(size_t size
)
36 * Very large allocations can stress page reclaim, so fall back to
37 * vmalloc() if the allocation size will be considered "large" by the VM.
39 if (size
<= (PAGE_SIZE
<< PAGE_ALLOC_COSTLY_ORDER
)) {
40 void *data
= kmalloc(size
, GFP_KERNEL
|__GFP_NOWARN
|__GFP_NORETRY
);
47 static void __free_fdtable(struct fdtable
*fdt
)
50 kvfree(fdt
->open_fds
);
54 static void free_fdtable_rcu(struct rcu_head
*rcu
)
56 __free_fdtable(container_of(rcu
, struct fdtable
, rcu
));
60 * Expand the fdset in the files_struct. Called with the files spinlock
63 static void copy_fdtable(struct fdtable
*nfdt
, struct fdtable
*ofdt
)
65 unsigned int cpy
, set
;
67 BUG_ON(nfdt
->max_fds
< ofdt
->max_fds
);
69 cpy
= ofdt
->max_fds
* sizeof(struct file
*);
70 set
= (nfdt
->max_fds
- ofdt
->max_fds
) * sizeof(struct file
*);
71 memcpy(nfdt
->fd
, ofdt
->fd
, cpy
);
72 memset((char *)(nfdt
->fd
) + cpy
, 0, set
);
74 cpy
= ofdt
->max_fds
/ BITS_PER_BYTE
;
75 set
= (nfdt
->max_fds
- ofdt
->max_fds
) / BITS_PER_BYTE
;
76 memcpy(nfdt
->open_fds
, ofdt
->open_fds
, cpy
);
77 memset((char *)(nfdt
->open_fds
) + cpy
, 0, set
);
78 memcpy(nfdt
->close_on_exec
, ofdt
->close_on_exec
, cpy
);
79 memset((char *)(nfdt
->close_on_exec
) + cpy
, 0, set
);
82 static struct fdtable
* alloc_fdtable(unsigned int nr
)
88 * Figure out how many fds we actually want to support in this fdtable.
89 * Allocation steps are keyed to the size of the fdarray, since it
90 * grows far faster than any of the other dynamic data. We try to fit
91 * the fdarray into comfortable page-tuned chunks: starting at 1024B
92 * and growing in powers of two from there on.
94 nr
/= (1024 / sizeof(struct file
*));
95 nr
= roundup_pow_of_two(nr
+ 1);
96 nr
*= (1024 / sizeof(struct file
*));
98 * Note that this can drive nr *below* what we had passed if sysctl_nr_open
99 * had been set lower between the check in expand_files() and here. Deal
100 * with that in caller, it's cheaper that way.
102 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
103 * bitmaps handling below becomes unpleasant, to put it mildly...
105 if (unlikely(nr
> sysctl_nr_open
))
106 nr
= ((sysctl_nr_open
- 1) | (BITS_PER_LONG
- 1)) + 1;
108 fdt
= kmalloc(sizeof(struct fdtable
), GFP_KERNEL
);
112 data
= alloc_fdmem(nr
* sizeof(struct file
*));
117 data
= alloc_fdmem(max_t(size_t,
118 2 * nr
/ BITS_PER_BYTE
, L1_CACHE_BYTES
));
121 fdt
->open_fds
= data
;
122 data
+= nr
/ BITS_PER_BYTE
;
123 fdt
->close_on_exec
= data
;
136 * Expand the file descriptor table.
137 * This function will allocate a new fdtable and both fd array and fdset, of
139 * Return <0 error code on error; 1 on successful completion.
140 * The files->file_lock should be held on entry, and will be held on exit.
142 static int expand_fdtable(struct files_struct
*files
, int nr
)
143 __releases(files
->file_lock
)
144 __acquires(files
->file_lock
)
146 struct fdtable
*new_fdt
, *cur_fdt
;
148 spin_unlock(&files
->file_lock
);
149 new_fdt
= alloc_fdtable(nr
);
150 spin_lock(&files
->file_lock
);
154 * extremely unlikely race - sysctl_nr_open decreased between the check in
155 * caller and alloc_fdtable(). Cheaper to catch it here...
157 if (unlikely(new_fdt
->max_fds
<= nr
)) {
158 __free_fdtable(new_fdt
);
162 * Check again since another task may have expanded the fd table while
163 * we dropped the lock
165 cur_fdt
= files_fdtable(files
);
166 if (nr
>= cur_fdt
->max_fds
) {
167 /* Continue as planned */
168 copy_fdtable(new_fdt
, cur_fdt
);
169 rcu_assign_pointer(files
->fdt
, new_fdt
);
170 if (cur_fdt
!= &files
->fdtab
)
171 call_rcu(&cur_fdt
->rcu
, free_fdtable_rcu
);
173 /* Somebody else expanded, so undo our attempt */
174 __free_fdtable(new_fdt
);
181 * This function will expand the file structures, if the requested size exceeds
182 * the current capacity and there is room for expansion.
183 * Return <0 error code on error; 0 when nothing done; 1 when files were
184 * expanded and execution may have blocked.
185 * The files->file_lock should be held on entry, and will be held on exit.
187 static int expand_files(struct files_struct
*files
, int nr
)
191 fdt
= files_fdtable(files
);
193 /* Do we need to expand? */
194 if (nr
< fdt
->max_fds
)
198 if (nr
>= sysctl_nr_open
)
201 /* All good, so we try */
202 return expand_fdtable(files
, nr
);
205 static inline void __set_close_on_exec(int fd
, struct fdtable
*fdt
)
207 __set_bit(fd
, fdt
->close_on_exec
);
210 static inline void __clear_close_on_exec(int fd
, struct fdtable
*fdt
)
212 __clear_bit(fd
, fdt
->close_on_exec
);
215 static inline void __set_open_fd(int fd
, struct fdtable
*fdt
)
217 __set_bit(fd
, fdt
->open_fds
);
220 static inline void __clear_open_fd(int fd
, struct fdtable
*fdt
)
222 __clear_bit(fd
, fdt
->open_fds
);
225 static int count_open_files(struct fdtable
*fdt
)
227 int size
= fdt
->max_fds
;
230 /* Find the last open fd */
231 for (i
= size
/ BITS_PER_LONG
; i
> 0; ) {
232 if (fdt
->open_fds
[--i
])
235 i
= (i
+ 1) * BITS_PER_LONG
;
240 * Allocate a new files structure and copy contents from the
241 * passed in files structure.
242 * errorp will be valid only when the returned files_struct is NULL.
244 struct files_struct
*dup_fd(struct files_struct
*oldf
, int *errorp
)
246 struct files_struct
*newf
;
247 struct file
**old_fds
, **new_fds
;
248 int open_files
, size
, i
;
249 struct fdtable
*old_fdt
, *new_fdt
;
252 newf
= kmem_cache_alloc(files_cachep
, GFP_KERNEL
);
256 atomic_set(&newf
->count
, 1);
258 spin_lock_init(&newf
->file_lock
);
260 new_fdt
= &newf
->fdtab
;
261 new_fdt
->max_fds
= NR_OPEN_DEFAULT
;
262 new_fdt
->close_on_exec
= newf
->close_on_exec_init
;
263 new_fdt
->open_fds
= newf
->open_fds_init
;
264 new_fdt
->fd
= &newf
->fd_array
[0];
266 spin_lock(&oldf
->file_lock
);
267 old_fdt
= files_fdtable(oldf
);
268 open_files
= count_open_files(old_fdt
);
271 * Check whether we need to allocate a larger fd array and fd set.
273 while (unlikely(open_files
> new_fdt
->max_fds
)) {
274 spin_unlock(&oldf
->file_lock
);
276 if (new_fdt
!= &newf
->fdtab
)
277 __free_fdtable(new_fdt
);
279 new_fdt
= alloc_fdtable(open_files
- 1);
285 /* beyond sysctl_nr_open; nothing to do */
286 if (unlikely(new_fdt
->max_fds
< open_files
)) {
287 __free_fdtable(new_fdt
);
293 * Reacquire the oldf lock and a pointer to its fd table
294 * who knows it may have a new bigger fd table. We need
295 * the latest pointer.
297 spin_lock(&oldf
->file_lock
);
298 old_fdt
= files_fdtable(oldf
);
299 open_files
= count_open_files(old_fdt
);
302 old_fds
= old_fdt
->fd
;
303 new_fds
= new_fdt
->fd
;
305 memcpy(new_fdt
->open_fds
, old_fdt
->open_fds
, open_files
/ 8);
306 memcpy(new_fdt
->close_on_exec
, old_fdt
->close_on_exec
, open_files
/ 8);
308 for (i
= open_files
; i
!= 0; i
--) {
309 struct file
*f
= *old_fds
++;
314 * The fd may be claimed in the fd bitmap but not yet
315 * instantiated in the files array if a sibling thread
316 * is partway through open(). So make sure that this
317 * fd is available to the new process.
319 __clear_open_fd(open_files
- i
, new_fdt
);
321 rcu_assign_pointer(*new_fds
++, f
);
323 spin_unlock(&oldf
->file_lock
);
325 /* compute the remainder to be cleared */
326 size
= (new_fdt
->max_fds
- open_files
) * sizeof(struct file
*);
328 /* This is long word aligned thus could use a optimized version */
329 memset(new_fds
, 0, size
);
331 if (new_fdt
->max_fds
> open_files
) {
332 int left
= (new_fdt
->max_fds
- open_files
) / 8;
333 int start
= open_files
/ BITS_PER_LONG
;
335 memset(&new_fdt
->open_fds
[start
], 0, left
);
336 memset(&new_fdt
->close_on_exec
[start
], 0, left
);
339 rcu_assign_pointer(newf
->fdt
, new_fdt
);
344 kmem_cache_free(files_cachep
, newf
);
349 static struct fdtable
*close_files(struct files_struct
* files
)
352 * It is safe to dereference the fd table without RCU or
353 * ->file_lock because this is the last reference to the
356 struct fdtable
*fdt
= rcu_dereference_raw(files
->fdt
);
361 i
= j
* BITS_PER_LONG
;
362 if (i
>= fdt
->max_fds
)
364 set
= fdt
->open_fds
[j
++];
367 struct file
* file
= xchg(&fdt
->fd
[i
], NULL
);
369 filp_close(file
, files
);
381 struct files_struct
*get_files_struct(struct task_struct
*task
)
383 struct files_struct
*files
;
388 atomic_inc(&files
->count
);
394 void put_files_struct(struct files_struct
*files
)
396 if (atomic_dec_and_test(&files
->count
)) {
397 struct fdtable
*fdt
= close_files(files
);
399 /* free the arrays if they are not embedded */
400 if (fdt
!= &files
->fdtab
)
402 kmem_cache_free(files_cachep
, files
);
406 void reset_files_struct(struct files_struct
*files
)
408 struct task_struct
*tsk
= current
;
409 struct files_struct
*old
;
415 put_files_struct(old
);
418 void exit_files(struct task_struct
*tsk
)
420 struct files_struct
* files
= tsk
->files
;
426 put_files_struct(files
);
430 struct files_struct init_files
= {
431 .count
= ATOMIC_INIT(1),
432 .fdt
= &init_files
.fdtab
,
434 .max_fds
= NR_OPEN_DEFAULT
,
435 .fd
= &init_files
.fd_array
[0],
436 .close_on_exec
= init_files
.close_on_exec_init
,
437 .open_fds
= init_files
.open_fds_init
,
439 .file_lock
= __SPIN_LOCK_UNLOCKED(init_files
.file_lock
),
443 * allocate a file descriptor, mark it busy.
445 int __alloc_fd(struct files_struct
*files
,
446 unsigned start
, unsigned end
, unsigned flags
)
452 spin_lock(&files
->file_lock
);
454 fdt
= files_fdtable(files
);
456 if (fd
< files
->next_fd
)
459 if (fd
< fdt
->max_fds
)
460 fd
= find_next_zero_bit(fdt
->open_fds
, fdt
->max_fds
, fd
);
463 * N.B. For clone tasks sharing a files structure, this test
464 * will limit the total number of files that can be opened.
470 error
= expand_files(files
, fd
);
475 * If we needed to expand the fs array we
476 * might have blocked - try again.
481 if (start
<= files
->next_fd
)
482 files
->next_fd
= fd
+ 1;
484 __set_open_fd(fd
, fdt
);
485 if (flags
& O_CLOEXEC
)
486 __set_close_on_exec(fd
, fdt
);
488 __clear_close_on_exec(fd
, fdt
);
492 if (rcu_access_pointer(fdt
->fd
[fd
]) != NULL
) {
493 printk(KERN_WARNING
"alloc_fd: slot %d not NULL!\n", fd
);
494 rcu_assign_pointer(fdt
->fd
[fd
], NULL
);
499 spin_unlock(&files
->file_lock
);
503 static int alloc_fd(unsigned start
, unsigned flags
)
505 return __alloc_fd(current
->files
, start
, rlimit(RLIMIT_NOFILE
), flags
);
508 int get_unused_fd_flags(unsigned flags
)
510 return __alloc_fd(current
->files
, 0, rlimit(RLIMIT_NOFILE
), flags
);
512 EXPORT_SYMBOL(get_unused_fd_flags
);
514 static void __put_unused_fd(struct files_struct
*files
, unsigned int fd
)
516 struct fdtable
*fdt
= files_fdtable(files
);
517 __clear_open_fd(fd
, fdt
);
518 if (fd
< files
->next_fd
)
522 void put_unused_fd(unsigned int fd
)
524 struct files_struct
*files
= current
->files
;
525 spin_lock(&files
->file_lock
);
526 __put_unused_fd(files
, fd
);
527 spin_unlock(&files
->file_lock
);
530 EXPORT_SYMBOL(put_unused_fd
);
533 * Install a file pointer in the fd array.
535 * The VFS is full of places where we drop the files lock between
536 * setting the open_fds bitmap and installing the file in the file
537 * array. At any such point, we are vulnerable to a dup2() race
538 * installing a file in the array before us. We need to detect this and
539 * fput() the struct file we are about to overwrite in this case.
541 * It should never happen - if we allow dup2() do it, _really_ bad things
544 * NOTE: __fd_install() variant is really, really low-level; don't
545 * use it unless you are forced to by truly lousy API shoved down
546 * your throat. 'files' *MUST* be either current->files or obtained
547 * by get_files_struct(current) done by whoever had given it to you,
548 * or really bad things will happen. Normally you want to use
549 * fd_install() instead.
552 void __fd_install(struct files_struct
*files
, unsigned int fd
,
556 spin_lock(&files
->file_lock
);
557 fdt
= files_fdtable(files
);
558 BUG_ON(fdt
->fd
[fd
] != NULL
);
559 rcu_assign_pointer(fdt
->fd
[fd
], file
);
560 spin_unlock(&files
->file_lock
);
563 void fd_install(unsigned int fd
, struct file
*file
)
565 __fd_install(current
->files
, fd
, file
);
568 EXPORT_SYMBOL(fd_install
);
571 * The same warnings as for __alloc_fd()/__fd_install() apply here...
573 int __close_fd(struct files_struct
*files
, unsigned fd
)
578 spin_lock(&files
->file_lock
);
579 fdt
= files_fdtable(files
);
580 if (fd
>= fdt
->max_fds
)
585 rcu_assign_pointer(fdt
->fd
[fd
], NULL
);
586 __clear_close_on_exec(fd
, fdt
);
587 __put_unused_fd(files
, fd
);
588 spin_unlock(&files
->file_lock
);
589 return filp_close(file
, files
);
592 spin_unlock(&files
->file_lock
);
596 void do_close_on_exec(struct files_struct
*files
)
601 /* exec unshares first */
602 spin_lock(&files
->file_lock
);
605 unsigned fd
= i
* BITS_PER_LONG
;
606 fdt
= files_fdtable(files
);
607 if (fd
>= fdt
->max_fds
)
609 set
= fdt
->close_on_exec
[i
];
612 fdt
->close_on_exec
[i
] = 0;
613 for ( ; set
; fd
++, set
>>= 1) {
620 rcu_assign_pointer(fdt
->fd
[fd
], NULL
);
621 __put_unused_fd(files
, fd
);
622 spin_unlock(&files
->file_lock
);
623 filp_close(file
, files
);
625 spin_lock(&files
->file_lock
);
629 spin_unlock(&files
->file_lock
);
632 static struct file
*__fget(unsigned int fd
, fmode_t mask
)
634 struct files_struct
*files
= current
->files
;
638 file
= fcheck_files(files
, fd
);
640 /* File object ref couldn't be taken */
641 if ((file
->f_mode
& mask
) ||
642 !atomic_long_inc_not_zero(&file
->f_count
))
650 struct file
*fget(unsigned int fd
)
652 return __fget(fd
, FMODE_PATH
);
656 struct file
*fget_raw(unsigned int fd
)
658 return __fget(fd
, 0);
660 EXPORT_SYMBOL(fget_raw
);
663 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
665 * You can use this instead of fget if you satisfy all of the following
667 * 1) You must call fput_light before exiting the syscall and returning control
668 * to userspace (i.e. you cannot remember the returned struct file * after
669 * returning to userspace).
670 * 2) You must not call filp_close on the returned struct file * in between
671 * calls to fget_light and fput_light.
672 * 3) You must not clone the current task in between the calls to fget_light
675 * The fput_needed flag returned by fget_light should be passed to the
676 * corresponding fput_light.
678 static unsigned long __fget_light(unsigned int fd
, fmode_t mask
)
680 struct files_struct
*files
= current
->files
;
683 if (atomic_read(&files
->count
) == 1) {
684 file
= __fcheck_files(files
, fd
);
685 if (!file
|| unlikely(file
->f_mode
& mask
))
687 return (unsigned long)file
;
689 file
= __fget(fd
, mask
);
692 return FDPUT_FPUT
| (unsigned long)file
;
695 unsigned long __fdget(unsigned int fd
)
697 return __fget_light(fd
, FMODE_PATH
);
699 EXPORT_SYMBOL(__fdget
);
701 unsigned long __fdget_raw(unsigned int fd
)
703 return __fget_light(fd
, 0);
706 unsigned long __fdget_pos(unsigned int fd
)
708 unsigned long v
= __fdget(fd
);
709 struct file
*file
= (struct file
*)(v
& ~3);
711 if (file
&& (file
->f_mode
& FMODE_ATOMIC_POS
)) {
712 if (file_count(file
) > 1) {
713 v
|= FDPUT_POS_UNLOCK
;
714 mutex_lock(&file
->f_pos_lock
);
721 * We only lock f_pos if we have threads or if the file might be
722 * shared with another process. In both cases we'll have an elevated
723 * file count (done either by fdget() or by fork()).
726 void set_close_on_exec(unsigned int fd
, int flag
)
728 struct files_struct
*files
= current
->files
;
730 spin_lock(&files
->file_lock
);
731 fdt
= files_fdtable(files
);
733 __set_close_on_exec(fd
, fdt
);
735 __clear_close_on_exec(fd
, fdt
);
736 spin_unlock(&files
->file_lock
);
739 bool get_close_on_exec(unsigned int fd
)
741 struct files_struct
*files
= current
->files
;
745 fdt
= files_fdtable(files
);
746 res
= close_on_exec(fd
, fdt
);
751 static int do_dup2(struct files_struct
*files
,
752 struct file
*file
, unsigned fd
, unsigned flags
)
758 * We need to detect attempts to do dup2() over allocated but still
759 * not finished descriptor. NB: OpenBSD avoids that at the price of
760 * extra work in their equivalent of fget() - they insert struct
761 * file immediately after grabbing descriptor, mark it larval if
762 * more work (e.g. actual opening) is needed and make sure that
763 * fget() treats larval files as absent. Potentially interesting,
764 * but while extra work in fget() is trivial, locking implications
765 * and amount of surgery on open()-related paths in VFS are not.
766 * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
767 * deadlocks in rather amusing ways, AFAICS. All of that is out of
768 * scope of POSIX or SUS, since neither considers shared descriptor
769 * tables and this condition does not arise without those.
771 fdt
= files_fdtable(files
);
772 tofree
= fdt
->fd
[fd
];
773 if (!tofree
&& fd_is_open(fd
, fdt
))
776 rcu_assign_pointer(fdt
->fd
[fd
], file
);
777 __set_open_fd(fd
, fdt
);
778 if (flags
& O_CLOEXEC
)
779 __set_close_on_exec(fd
, fdt
);
781 __clear_close_on_exec(fd
, fdt
);
782 spin_unlock(&files
->file_lock
);
785 filp_close(tofree
, files
);
790 spin_unlock(&files
->file_lock
);
794 int replace_fd(unsigned fd
, struct file
*file
, unsigned flags
)
797 struct files_struct
*files
= current
->files
;
800 return __close_fd(files
, fd
);
802 if (fd
>= rlimit(RLIMIT_NOFILE
))
805 spin_lock(&files
->file_lock
);
806 err
= expand_files(files
, fd
);
807 if (unlikely(err
< 0))
809 return do_dup2(files
, file
, fd
, flags
);
812 spin_unlock(&files
->file_lock
);
816 SYSCALL_DEFINE3(dup3
, unsigned int, oldfd
, unsigned int, newfd
, int, flags
)
820 struct files_struct
*files
= current
->files
;
822 if ((flags
& ~O_CLOEXEC
) != 0)
825 if (unlikely(oldfd
== newfd
))
828 if (newfd
>= rlimit(RLIMIT_NOFILE
))
831 spin_lock(&files
->file_lock
);
832 err
= expand_files(files
, newfd
);
833 file
= fcheck(oldfd
);
836 if (unlikely(err
< 0)) {
841 return do_dup2(files
, file
, newfd
, flags
);
846 spin_unlock(&files
->file_lock
);
850 SYSCALL_DEFINE2(dup2
, unsigned int, oldfd
, unsigned int, newfd
)
852 if (unlikely(newfd
== oldfd
)) { /* corner case */
853 struct files_struct
*files
= current
->files
;
857 if (!fcheck_files(files
, oldfd
))
862 return sys_dup3(oldfd
, newfd
, 0);
865 SYSCALL_DEFINE1(dup
, unsigned int, fildes
)
868 struct file
*file
= fget_raw(fildes
);
871 ret
= get_unused_fd();
873 fd_install(ret
, file
);
880 int f_dupfd(unsigned int from
, struct file
*file
, unsigned flags
)
883 if (from
>= rlimit(RLIMIT_NOFILE
))
885 err
= alloc_fd(from
, flags
);
888 fd_install(err
, file
);
893 int iterate_fd(struct files_struct
*files
, unsigned n
,
894 int (*f
)(const void *, struct file
*, unsigned),
901 spin_lock(&files
->file_lock
);
902 for (fdt
= files_fdtable(files
); n
< fdt
->max_fds
; n
++) {
904 file
= rcu_dereference_check_fdtable(files
, fdt
->fd
[n
]);
911 spin_unlock(&files
->file_lock
);
914 EXPORT_SYMBOL(iterate_fd
);