1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/fs/file_table.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
9 #include <linux/string.h>
10 #include <linux/slab.h>
11 #include <linux/file.h>
12 #include <linux/fdtable.h>
13 #include <linux/init.h>
14 #include <linux/module.h>
16 #include <linux/security.h>
17 #include <linux/cred.h>
18 #include <linux/eventpoll.h>
19 #include <linux/rcupdate.h>
20 #include <linux/mount.h>
21 #include <linux/capability.h>
22 #include <linux/cdev.h>
23 #include <linux/fsnotify.h>
24 #include <linux/sysctl.h>
25 #include <linux/percpu_counter.h>
26 #include <linux/percpu.h>
27 #include <linux/task_work.h>
28 #include <linux/ima.h>
29 #include <linux/swap.h>
31 #include <linux/atomic.h>
35 /* sysctl tunables... */
36 struct files_stat_struct files_stat
= {
40 /* SLAB cache for file structures */
41 static struct kmem_cache
*filp_cachep __read_mostly
;
43 static struct percpu_counter nr_files __cacheline_aligned_in_smp
;
45 static void file_free_rcu(struct rcu_head
*head
)
47 struct file
*f
= container_of(head
, struct file
, f_u
.fu_rcuhead
);
50 kmem_cache_free(filp_cachep
, f
);
53 static inline void file_free(struct file
*f
)
55 security_file_free(f
);
56 if (!(f
->f_mode
& FMODE_NOACCOUNT
))
57 percpu_counter_dec(&nr_files
);
58 call_rcu(&f
->f_u
.fu_rcuhead
, file_free_rcu
);
62 * Return the total number of open files in the system
64 static long get_nr_files(void)
66 return percpu_counter_read_positive(&nr_files
);
70 * Return the maximum number of open files in the system
72 unsigned long get_max_files(void)
74 return files_stat
.max_files
;
76 EXPORT_SYMBOL_GPL(get_max_files
);
79 * Handle nr_files sysctl
81 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
82 int proc_nr_files(struct ctl_table
*table
, int write
,
83 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
85 files_stat
.nr_files
= get_nr_files();
86 return proc_doulongvec_minmax(table
, write
, buffer
, lenp
, ppos
);
89 int proc_nr_files(struct ctl_table
*table
, int write
,
90 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
96 static struct file
*__alloc_file(int flags
, const struct cred
*cred
)
101 f
= kmem_cache_zalloc(filp_cachep
, GFP_KERNEL
);
103 return ERR_PTR(-ENOMEM
);
105 f
->f_cred
= get_cred(cred
);
106 error
= security_file_alloc(f
);
107 if (unlikely(error
)) {
108 file_free_rcu(&f
->f_u
.fu_rcuhead
);
109 return ERR_PTR(error
);
112 atomic_long_set(&f
->f_count
, 1);
113 rwlock_init(&f
->f_owner
.lock
);
114 spin_lock_init(&f
->f_lock
);
115 mutex_init(&f
->f_pos_lock
);
116 eventpoll_init_file(f
);
118 f
->f_mode
= OPEN_FMODE(flags
);
119 /* f->f_version: 0 */
124 /* Find an unused file structure and return a pointer to it.
125 * Returns an error pointer if some error happend e.g. we over file
126 * structures limit, run out of memory or operation is not permitted.
128 * Be very careful using this. You are responsible for
129 * getting write access to any mount that you might assign
130 * to this filp, if it is opened for write. If this is not
131 * done, you will imbalance int the mount's writer count
132 * and a warning at __fput() time.
134 struct file
*alloc_empty_file(int flags
, const struct cred
*cred
)
140 * Privileged users can go above max_files
142 if (get_nr_files() >= files_stat
.max_files
&& !capable(CAP_SYS_ADMIN
)) {
144 * percpu_counters are inaccurate. Do an expensive check before
147 if (percpu_counter_sum_positive(&nr_files
) >= files_stat
.max_files
)
151 f
= __alloc_file(flags
, cred
);
153 percpu_counter_inc(&nr_files
);
158 /* Ran out of filps - report that */
159 if (get_nr_files() > old_max
) {
160 pr_info("VFS: file-max limit %lu reached\n", get_max_files());
161 old_max
= get_nr_files();
163 return ERR_PTR(-ENFILE
);
167 * Variant of alloc_empty_file() that doesn't check and modify nr_files.
169 * Should not be used unless there's a very good reason to do so.
171 struct file
*alloc_empty_file_noaccount(int flags
, const struct cred
*cred
)
173 struct file
*f
= __alloc_file(flags
, cred
);
176 f
->f_mode
|= FMODE_NOACCOUNT
;
182 * alloc_file - allocate and initialize a 'struct file'
184 * @path: the (dentry, vfsmount) pair for the new file
185 * @flags: O_... flags with which the new file will be opened
186 * @fop: the 'struct file_operations' for the new file
188 static struct file
*alloc_file(const struct path
*path
, int flags
,
189 const struct file_operations
*fop
)
193 file
= alloc_empty_file(flags
, current_cred());
197 file
->f_path
= *path
;
198 file
->f_inode
= path
->dentry
->d_inode
;
199 file
->f_mapping
= path
->dentry
->d_inode
->i_mapping
;
200 file
->f_wb_err
= filemap_sample_wb_err(file
->f_mapping
);
201 if ((file
->f_mode
& FMODE_READ
) &&
202 likely(fop
->read
|| fop
->read_iter
))
203 file
->f_mode
|= FMODE_CAN_READ
;
204 if ((file
->f_mode
& FMODE_WRITE
) &&
205 likely(fop
->write
|| fop
->write_iter
))
206 file
->f_mode
|= FMODE_CAN_WRITE
;
207 file
->f_mode
|= FMODE_OPENED
;
209 if ((file
->f_mode
& (FMODE_READ
| FMODE_WRITE
)) == FMODE_READ
)
210 i_readcount_inc(path
->dentry
->d_inode
);
214 struct file
*alloc_file_pseudo(struct inode
*inode
, struct vfsmount
*mnt
,
215 const char *name
, int flags
,
216 const struct file_operations
*fops
)
218 static const struct dentry_operations anon_ops
= {
219 .d_dname
= simple_dname
221 struct qstr
this = QSTR_INIT(name
, strlen(name
));
225 path
.dentry
= d_alloc_pseudo(mnt
->mnt_sb
, &this);
227 return ERR_PTR(-ENOMEM
);
228 if (!mnt
->mnt_sb
->s_d_op
)
229 d_set_d_op(path
.dentry
, &anon_ops
);
230 path
.mnt
= mntget(mnt
);
231 d_instantiate(path
.dentry
, inode
);
232 file
= alloc_file(&path
, flags
, fops
);
239 EXPORT_SYMBOL(alloc_file_pseudo
);
241 struct file
*alloc_file_clone(struct file
*base
, int flags
,
242 const struct file_operations
*fops
)
244 struct file
*f
= alloc_file(&base
->f_path
, flags
, fops
);
246 path_get(&f
->f_path
);
247 f
->f_mapping
= base
->f_mapping
;
252 /* the real guts of fput() - releasing the last reference to file
254 static void __fput(struct file
*file
)
256 struct dentry
*dentry
= file
->f_path
.dentry
;
257 struct vfsmount
*mnt
= file
->f_path
.mnt
;
258 struct inode
*inode
= file
->f_inode
;
259 fmode_t mode
= file
->f_mode
;
261 if (unlikely(!(file
->f_mode
& FMODE_OPENED
)))
266 fsnotify_close(file
);
268 * The function eventpoll_release() should be the first called
269 * in the file cleanup chain.
271 eventpoll_release(file
);
272 locks_remove_file(file
);
275 if (unlikely(file
->f_flags
& FASYNC
)) {
276 if (file
->f_op
->fasync
)
277 file
->f_op
->fasync(-1, file
, 0);
279 if (file
->f_op
->release
)
280 file
->f_op
->release(inode
, file
);
281 if (unlikely(S_ISCHR(inode
->i_mode
) && inode
->i_cdev
!= NULL
&&
282 !(mode
& FMODE_PATH
))) {
283 cdev_put(inode
->i_cdev
);
285 fops_put(file
->f_op
);
286 put_pid(file
->f_owner
.pid
);
287 if ((mode
& (FMODE_READ
| FMODE_WRITE
)) == FMODE_READ
)
288 i_readcount_dec(inode
);
289 if (mode
& FMODE_WRITER
) {
290 put_write_access(inode
);
291 __mnt_drop_write(mnt
);
294 if (unlikely(mode
& FMODE_NEED_UNMOUNT
))
295 dissolve_on_fput(mnt
);
301 static LLIST_HEAD(delayed_fput_list
);
302 static void delayed_fput(struct work_struct
*unused
)
304 struct llist_node
*node
= llist_del_all(&delayed_fput_list
);
307 llist_for_each_entry_safe(f
, t
, node
, f_u
.fu_llist
)
311 static void ____fput(struct callback_head
*work
)
313 __fput(container_of(work
, struct file
, f_u
.fu_rcuhead
));
317 * If kernel thread really needs to have the final fput() it has done
318 * to complete, call this. The only user right now is the boot - we
319 * *do* need to make sure our writes to binaries on initramfs has
320 * not left us with opened struct file waiting for __fput() - execve()
321 * won't work without that. Please, don't add more callers without
322 * very good reasons; in particular, never call that with locks
323 * held and never call that from a thread that might need to do
324 * some work on any kind of umount.
326 void flush_delayed_fput(void)
331 static DECLARE_DELAYED_WORK(delayed_fput_work
, delayed_fput
);
333 void fput_many(struct file
*file
, unsigned int refs
)
335 if (atomic_long_sub_and_test(refs
, &file
->f_count
)) {
336 struct task_struct
*task
= current
;
338 if (likely(!in_interrupt() && !(task
->flags
& PF_KTHREAD
))) {
339 init_task_work(&file
->f_u
.fu_rcuhead
, ____fput
);
340 if (!task_work_add(task
, &file
->f_u
.fu_rcuhead
, true))
343 * After this task has run exit_task_work(),
344 * task_work_add() will fail. Fall through to delayed
345 * fput to avoid leaking *file.
349 if (llist_add(&file
->f_u
.fu_llist
, &delayed_fput_list
))
350 schedule_delayed_work(&delayed_fput_work
, 1);
354 void fput(struct file
*file
)
360 * synchronous analog of fput(); for kernel threads that might be needed
361 * in some umount() (and thus can't use flush_delayed_fput() without
362 * risking deadlocks), need to wait for completion of __fput() and know
363 * for this specific struct file it won't involve anything that would
364 * need them. Use only if you really need it - at the very least,
365 * don't blindly convert fput() by kernel thread to that.
367 void __fput_sync(struct file
*file
)
369 if (atomic_long_dec_and_test(&file
->f_count
)) {
370 struct task_struct
*task
= current
;
371 BUG_ON(!(task
->flags
& PF_KTHREAD
));
378 void __init
files_init(void)
380 filp_cachep
= kmem_cache_create("filp", sizeof(struct file
), 0,
381 SLAB_HWCACHE_ALIGN
| SLAB_PANIC
| SLAB_ACCOUNT
, NULL
);
382 percpu_counter_init(&nr_files
, 0, GFP_KERNEL
);
386 * One file with associated inode and dcache is very roughly 1K. Per default
387 * do not use more than 10% of our memory for files.
389 void __init
files_maxfiles_init(void)
392 unsigned long nr_pages
= totalram_pages();
393 unsigned long memreserve
= (nr_pages
- nr_free_pages()) * 3/2;
395 memreserve
= min(memreserve
, nr_pages
- 1);
396 n
= ((nr_pages
- memreserve
) * (PAGE_SIZE
/ 1024)) / 10;
398 files_stat
.max_files
= max_t(unsigned long, n
, NR_FILE
);