1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * fs/inotify_user.c - inotify support for userspace
6 * John McCutchan <ttb@tentacle.dhs.org>
7 * Robert Love <rml@novell.com>
9 * Copyright (C) 2005 John McCutchan
10 * Copyright 2006 Hewlett-Packard Development Company, L.P.
12 * Copyright (C) 2009 Eric Paris <Red Hat Inc>
13 * inotify was largely rewriten to make use of the fsnotify infrastructure
16 #include <linux/file.h>
17 #include <linux/fs.h> /* struct inode */
18 #include <linux/fsnotify_backend.h>
19 #include <linux/idr.h>
20 #include <linux/init.h> /* fs_initcall */
21 #include <linux/inotify.h>
22 #include <linux/kernel.h> /* roundup() */
23 #include <linux/namei.h> /* LOOKUP_FOLLOW */
24 #include <linux/sched/signal.h>
25 #include <linux/slab.h> /* struct kmem_cache */
26 #include <linux/syscalls.h>
27 #include <linux/types.h>
28 #include <linux/anon_inodes.h>
29 #include <linux/uaccess.h>
30 #include <linux/poll.h>
31 #include <linux/wait.h>
32 #include <linux/memcontrol.h>
33 #include <linux/security.h>
36 #include "../fdinfo.h"
38 #include <asm/ioctls.h>
40 /* configurable via /proc/sys/fs/inotify/ */
41 static int inotify_max_queued_events __read_mostly
;
43 struct kmem_cache
*inotify_inode_mark_cachep __read_mostly
;
47 #include <linux/sysctl.h>
49 struct ctl_table inotify_table
[] = {
51 .procname
= "max_user_instances",
52 .data
= &init_user_ns
.ucount_max
[UCOUNT_INOTIFY_INSTANCES
],
53 .maxlen
= sizeof(int),
55 .proc_handler
= proc_dointvec_minmax
,
56 .extra1
= SYSCTL_ZERO
,
59 .procname
= "max_user_watches",
60 .data
= &init_user_ns
.ucount_max
[UCOUNT_INOTIFY_WATCHES
],
61 .maxlen
= sizeof(int),
63 .proc_handler
= proc_dointvec_minmax
,
64 .extra1
= SYSCTL_ZERO
,
67 .procname
= "max_queued_events",
68 .data
= &inotify_max_queued_events
,
69 .maxlen
= sizeof(int),
71 .proc_handler
= proc_dointvec_minmax
,
76 #endif /* CONFIG_SYSCTL */
78 static inline __u32
inotify_arg_to_mask(u32 arg
)
83 * everything should accept their own ignored, cares about children,
84 * and should receive events when the inode is unmounted
86 mask
= (FS_IN_IGNORED
| FS_EVENT_ON_CHILD
| FS_UNMOUNT
);
88 /* mask off the flags used to open the fd */
89 mask
|= (arg
& (IN_ALL_EVENTS
| IN_ONESHOT
| IN_EXCL_UNLINK
));
94 static inline u32
inotify_mask_to_arg(__u32 mask
)
96 return mask
& (IN_ALL_EVENTS
| IN_ISDIR
| IN_UNMOUNT
| IN_IGNORED
|
100 /* intofiy userspace file descriptor functions */
101 static __poll_t
inotify_poll(struct file
*file
, poll_table
*wait
)
103 struct fsnotify_group
*group
= file
->private_data
;
106 poll_wait(file
, &group
->notification_waitq
, wait
);
107 spin_lock(&group
->notification_lock
);
108 if (!fsnotify_notify_queue_is_empty(group
))
109 ret
= EPOLLIN
| EPOLLRDNORM
;
110 spin_unlock(&group
->notification_lock
);
115 static int round_event_name_len(struct fsnotify_event
*fsn_event
)
117 struct inotify_event_info
*event
;
119 event
= INOTIFY_E(fsn_event
);
120 if (!event
->name_len
)
122 return roundup(event
->name_len
+ 1, sizeof(struct inotify_event
));
126 * Get an inotify_kernel_event if one exists and is small
127 * enough to fit in "count". Return an error pointer if
130 * Called with the group->notification_lock held.
132 static struct fsnotify_event
*get_one_event(struct fsnotify_group
*group
,
135 size_t event_size
= sizeof(struct inotify_event
);
136 struct fsnotify_event
*event
;
138 if (fsnotify_notify_queue_is_empty(group
))
141 event
= fsnotify_peek_first_event(group
);
143 pr_debug("%s: group=%p event=%p\n", __func__
, group
, event
);
145 event_size
+= round_event_name_len(event
);
146 if (event_size
> count
)
147 return ERR_PTR(-EINVAL
);
149 /* held the notification_lock the whole time, so this is the
150 * same event we peeked above */
151 fsnotify_remove_first_event(group
);
157 * Copy an event to user space, returning how much we copied.
159 * We already checked that the event size is smaller than the
160 * buffer we had in "get_one_event()" above.
162 static ssize_t
copy_event_to_user(struct fsnotify_group
*group
,
163 struct fsnotify_event
*fsn_event
,
166 struct inotify_event inotify_event
;
167 struct inotify_event_info
*event
;
168 size_t event_size
= sizeof(struct inotify_event
);
172 pr_debug("%s: group=%p event=%p\n", __func__
, group
, fsn_event
);
174 event
= INOTIFY_E(fsn_event
);
175 name_len
= event
->name_len
;
177 * round up name length so it is a multiple of event_size
178 * plus an extra byte for the terminating '\0'.
180 pad_name_len
= round_event_name_len(fsn_event
);
181 inotify_event
.len
= pad_name_len
;
182 inotify_event
.mask
= inotify_mask_to_arg(event
->mask
);
183 inotify_event
.wd
= event
->wd
;
184 inotify_event
.cookie
= event
->sync_cookie
;
186 /* send the main event */
187 if (copy_to_user(buf
, &inotify_event
, event_size
))
193 * fsnotify only stores the pathname, so here we have to send the pathname
194 * and then pad that pathname out to a multiple of sizeof(inotify_event)
198 /* copy the path name */
199 if (copy_to_user(buf
, event
->name
, name_len
))
203 /* fill userspace with 0's */
204 if (clear_user(buf
, pad_name_len
- name_len
))
206 event_size
+= pad_name_len
;
212 static ssize_t
inotify_read(struct file
*file
, char __user
*buf
,
213 size_t count
, loff_t
*pos
)
215 struct fsnotify_group
*group
;
216 struct fsnotify_event
*kevent
;
219 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
222 group
= file
->private_data
;
224 add_wait_queue(&group
->notification_waitq
, &wait
);
226 spin_lock(&group
->notification_lock
);
227 kevent
= get_one_event(group
, count
);
228 spin_unlock(&group
->notification_lock
);
230 pr_debug("%s: group=%p kevent=%p\n", __func__
, group
, kevent
);
233 ret
= PTR_ERR(kevent
);
236 ret
= copy_event_to_user(group
, kevent
, buf
);
237 fsnotify_destroy_event(group
, kevent
);
246 if (file
->f_flags
& O_NONBLOCK
)
249 if (signal_pending(current
))
255 wait_woken(&wait
, TASK_INTERRUPTIBLE
, MAX_SCHEDULE_TIMEOUT
);
257 remove_wait_queue(&group
->notification_waitq
, &wait
);
259 if (start
!= buf
&& ret
!= -EFAULT
)
264 static int inotify_release(struct inode
*ignored
, struct file
*file
)
266 struct fsnotify_group
*group
= file
->private_data
;
268 pr_debug("%s: group=%p\n", __func__
, group
);
270 /* free this group, matching get was inotify_init->fsnotify_obtain_group */
271 fsnotify_destroy_group(group
);
276 static long inotify_ioctl(struct file
*file
, unsigned int cmd
,
279 struct fsnotify_group
*group
;
280 struct fsnotify_event
*fsn_event
;
285 group
= file
->private_data
;
286 p
= (void __user
*) arg
;
288 pr_debug("%s: group=%p cmd=%u\n", __func__
, group
, cmd
);
292 spin_lock(&group
->notification_lock
);
293 list_for_each_entry(fsn_event
, &group
->notification_list
,
295 send_len
+= sizeof(struct inotify_event
);
296 send_len
+= round_event_name_len(fsn_event
);
298 spin_unlock(&group
->notification_lock
);
299 ret
= put_user(send_len
, (int __user
*) p
);
301 #ifdef CONFIG_CHECKPOINT_RESTORE
302 case INOTIFY_IOC_SETNEXTWD
:
304 if (arg
>= 1 && arg
<= INT_MAX
) {
305 struct inotify_group_private_data
*data
;
307 data
= &group
->inotify_data
;
308 spin_lock(&data
->idr_lock
);
309 idr_set_cursor(&data
->idr
, (unsigned int)arg
);
310 spin_unlock(&data
->idr_lock
);
314 #endif /* CONFIG_CHECKPOINT_RESTORE */
320 static const struct file_operations inotify_fops
= {
321 .show_fdinfo
= inotify_show_fdinfo
,
322 .poll
= inotify_poll
,
323 .read
= inotify_read
,
324 .fasync
= fsnotify_fasync
,
325 .release
= inotify_release
,
326 .unlocked_ioctl
= inotify_ioctl
,
327 .compat_ioctl
= inotify_ioctl
,
328 .llseek
= noop_llseek
,
333 * find_inode - resolve a user-given path to a specific inode
335 static int inotify_find_inode(const char __user
*dirname
, struct path
*path
,
336 unsigned int flags
, __u64 mask
)
340 error
= user_path_at(AT_FDCWD
, dirname
, flags
, path
);
343 /* you can only watch an inode if you have read permissions on it */
344 error
= inode_permission(path
->dentry
->d_inode
, MAY_READ
);
349 error
= security_path_notify(path
, mask
,
350 FSNOTIFY_OBJ_TYPE_INODE
);
357 static int inotify_add_to_idr(struct idr
*idr
, spinlock_t
*idr_lock
,
358 struct inotify_inode_mark
*i_mark
)
362 idr_preload(GFP_KERNEL
);
365 ret
= idr_alloc_cyclic(idr
, i_mark
, 1, 0, GFP_NOWAIT
);
367 /* we added the mark to the idr, take a reference */
369 fsnotify_get_mark(&i_mark
->fsn_mark
);
372 spin_unlock(idr_lock
);
374 return ret
< 0 ? ret
: 0;
377 static struct inotify_inode_mark
*inotify_idr_find_locked(struct fsnotify_group
*group
,
380 struct idr
*idr
= &group
->inotify_data
.idr
;
381 spinlock_t
*idr_lock
= &group
->inotify_data
.idr_lock
;
382 struct inotify_inode_mark
*i_mark
;
384 assert_spin_locked(idr_lock
);
386 i_mark
= idr_find(idr
, wd
);
388 struct fsnotify_mark
*fsn_mark
= &i_mark
->fsn_mark
;
390 fsnotify_get_mark(fsn_mark
);
391 /* One ref for being in the idr, one ref we just took */
392 BUG_ON(refcount_read(&fsn_mark
->refcnt
) < 2);
398 static struct inotify_inode_mark
*inotify_idr_find(struct fsnotify_group
*group
,
401 struct inotify_inode_mark
*i_mark
;
402 spinlock_t
*idr_lock
= &group
->inotify_data
.idr_lock
;
405 i_mark
= inotify_idr_find_locked(group
, wd
);
406 spin_unlock(idr_lock
);
412 * Remove the mark from the idr (if present) and drop the reference
413 * on the mark because it was in the idr.
415 static void inotify_remove_from_idr(struct fsnotify_group
*group
,
416 struct inotify_inode_mark
*i_mark
)
418 struct idr
*idr
= &group
->inotify_data
.idr
;
419 spinlock_t
*idr_lock
= &group
->inotify_data
.idr_lock
;
420 struct inotify_inode_mark
*found_i_mark
= NULL
;
427 * does this i_mark think it is in the idr? we shouldn't get called
431 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
432 __func__
, i_mark
, i_mark
->wd
, i_mark
->fsn_mark
.group
);
436 /* Lets look in the idr to see if we find it */
437 found_i_mark
= inotify_idr_find_locked(group
, wd
);
438 if (unlikely(!found_i_mark
)) {
439 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
440 __func__
, i_mark
, i_mark
->wd
, i_mark
->fsn_mark
.group
);
445 * We found an mark in the idr at the right wd, but it's
446 * not the mark we were told to remove. eparis seriously
447 * fucked up somewhere.
449 if (unlikely(found_i_mark
!= i_mark
)) {
450 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p "
451 "found_i_mark=%p found_i_mark->wd=%d "
452 "found_i_mark->group=%p\n", __func__
, i_mark
,
453 i_mark
->wd
, i_mark
->fsn_mark
.group
, found_i_mark
,
454 found_i_mark
->wd
, found_i_mark
->fsn_mark
.group
);
459 * One ref for being in the idr
460 * one ref grabbed by inotify_idr_find
462 if (unlikely(refcount_read(&i_mark
->fsn_mark
.refcnt
) < 2)) {
463 printk(KERN_ERR
"%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
464 __func__
, i_mark
, i_mark
->wd
, i_mark
->fsn_mark
.group
);
465 /* we can't really recover with bad ref cnting.. */
470 /* Removed from the idr, drop that ref. */
471 fsnotify_put_mark(&i_mark
->fsn_mark
);
474 spin_unlock(idr_lock
);
475 /* match the ref taken by inotify_idr_find_locked() */
477 fsnotify_put_mark(&found_i_mark
->fsn_mark
);
481 * Send IN_IGNORED for this wd, remove this wd from the idr.
483 void inotify_ignored_and_remove_idr(struct fsnotify_mark
*fsn_mark
,
484 struct fsnotify_group
*group
)
486 struct inotify_inode_mark
*i_mark
;
487 struct fsnotify_iter_info iter_info
= { };
489 fsnotify_iter_set_report_type_mark(&iter_info
, FSNOTIFY_OBJ_TYPE_INODE
,
492 /* Queue ignore event for the watch */
493 inotify_handle_event(group
, NULL
, FS_IN_IGNORED
, NULL
,
494 FSNOTIFY_EVENT_NONE
, NULL
, 0, &iter_info
);
496 i_mark
= container_of(fsn_mark
, struct inotify_inode_mark
, fsn_mark
);
497 /* remove this mark from the idr */
498 inotify_remove_from_idr(group
, i_mark
);
500 dec_inotify_watches(group
->inotify_data
.ucounts
);
503 static int inotify_update_existing_watch(struct fsnotify_group
*group
,
507 struct fsnotify_mark
*fsn_mark
;
508 struct inotify_inode_mark
*i_mark
;
509 __u32 old_mask
, new_mask
;
511 int add
= (arg
& IN_MASK_ADD
);
512 int create
= (arg
& IN_MASK_CREATE
);
515 mask
= inotify_arg_to_mask(arg
);
517 fsn_mark
= fsnotify_find_mark(&inode
->i_fsnotify_marks
, group
);
525 i_mark
= container_of(fsn_mark
, struct inotify_inode_mark
, fsn_mark
);
527 spin_lock(&fsn_mark
->lock
);
528 old_mask
= fsn_mark
->mask
;
530 fsn_mark
->mask
|= mask
;
532 fsn_mark
->mask
= mask
;
533 new_mask
= fsn_mark
->mask
;
534 spin_unlock(&fsn_mark
->lock
);
536 if (old_mask
!= new_mask
) {
537 /* more bits in old than in new? */
538 int dropped
= (old_mask
& ~new_mask
);
539 /* more bits in this fsn_mark than the inode's mask? */
540 int do_inode
= (new_mask
& ~inode
->i_fsnotify_mask
);
542 /* update the inode with this new fsn_mark */
543 if (dropped
|| do_inode
)
544 fsnotify_recalc_mask(inode
->i_fsnotify_marks
);
552 /* match the get from fsnotify_find_mark() */
553 fsnotify_put_mark(fsn_mark
);
558 static int inotify_new_watch(struct fsnotify_group
*group
,
562 struct inotify_inode_mark
*tmp_i_mark
;
565 struct idr
*idr
= &group
->inotify_data
.idr
;
566 spinlock_t
*idr_lock
= &group
->inotify_data
.idr_lock
;
568 mask
= inotify_arg_to_mask(arg
);
570 tmp_i_mark
= kmem_cache_alloc(inotify_inode_mark_cachep
, GFP_KERNEL
);
571 if (unlikely(!tmp_i_mark
))
574 fsnotify_init_mark(&tmp_i_mark
->fsn_mark
, group
);
575 tmp_i_mark
->fsn_mark
.mask
= mask
;
578 ret
= inotify_add_to_idr(idr
, idr_lock
, tmp_i_mark
);
582 /* increment the number of watches the user has */
583 if (!inc_inotify_watches(group
->inotify_data
.ucounts
)) {
584 inotify_remove_from_idr(group
, tmp_i_mark
);
589 /* we are on the idr, now get on the inode */
590 ret
= fsnotify_add_inode_mark_locked(&tmp_i_mark
->fsn_mark
, inode
, 0);
592 /* we failed to get on the inode, get off the idr */
593 inotify_remove_from_idr(group
, tmp_i_mark
);
598 /* return the watch descriptor for this new mark */
599 ret
= tmp_i_mark
->wd
;
602 /* match the ref from fsnotify_init_mark() */
603 fsnotify_put_mark(&tmp_i_mark
->fsn_mark
);
608 static int inotify_update_watch(struct fsnotify_group
*group
, struct inode
*inode
, u32 arg
)
612 mutex_lock(&group
->mark_mutex
);
613 /* try to update and existing watch with the new arg */
614 ret
= inotify_update_existing_watch(group
, inode
, arg
);
615 /* no mark present, try to add a new one */
617 ret
= inotify_new_watch(group
, inode
, arg
);
618 mutex_unlock(&group
->mark_mutex
);
623 static struct fsnotify_group
*inotify_new_group(unsigned int max_events
)
625 struct fsnotify_group
*group
;
626 struct inotify_event_info
*oevent
;
628 group
= fsnotify_alloc_group(&inotify_fsnotify_ops
);
632 oevent
= kmalloc(sizeof(struct inotify_event_info
), GFP_KERNEL
);
633 if (unlikely(!oevent
)) {
634 fsnotify_destroy_group(group
);
635 return ERR_PTR(-ENOMEM
);
637 group
->overflow_event
= &oevent
->fse
;
638 fsnotify_init_event(group
->overflow_event
, 0);
639 oevent
->mask
= FS_Q_OVERFLOW
;
641 oevent
->sync_cookie
= 0;
642 oevent
->name_len
= 0;
644 group
->max_events
= max_events
;
645 group
->memcg
= get_mem_cgroup_from_mm(current
->mm
);
647 spin_lock_init(&group
->inotify_data
.idr_lock
);
648 idr_init(&group
->inotify_data
.idr
);
649 group
->inotify_data
.ucounts
= inc_ucount(current_user_ns(),
651 UCOUNT_INOTIFY_INSTANCES
);
653 if (!group
->inotify_data
.ucounts
) {
654 fsnotify_destroy_group(group
);
655 return ERR_PTR(-EMFILE
);
662 /* inotify syscalls */
663 static int do_inotify_init(int flags
)
665 struct fsnotify_group
*group
;
668 /* Check the IN_* constants for consistency. */
669 BUILD_BUG_ON(IN_CLOEXEC
!= O_CLOEXEC
);
670 BUILD_BUG_ON(IN_NONBLOCK
!= O_NONBLOCK
);
672 if (flags
& ~(IN_CLOEXEC
| IN_NONBLOCK
))
675 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
676 group
= inotify_new_group(inotify_max_queued_events
);
678 return PTR_ERR(group
);
680 ret
= anon_inode_getfd("inotify", &inotify_fops
, group
,
683 fsnotify_destroy_group(group
);
688 SYSCALL_DEFINE1(inotify_init1
, int, flags
)
690 return do_inotify_init(flags
);
693 SYSCALL_DEFINE0(inotify_init
)
695 return do_inotify_init(0);
698 SYSCALL_DEFINE3(inotify_add_watch
, int, fd
, const char __user
*, pathname
,
701 struct fsnotify_group
*group
;
709 * We share a lot of code with fs/dnotify. We also share
710 * the bit layout between inotify's IN_* and the fsnotify
711 * FS_*. This check ensures that only the inotify IN_*
712 * bits get passed in and set in watches/events.
714 if (unlikely(mask
& ~ALL_INOTIFY_BITS
))
717 * Require at least one valid bit set in the mask.
718 * Without _something_ set, we would have no events to
721 if (unlikely(!(mask
& ALL_INOTIFY_BITS
)))
725 if (unlikely(!f
.file
))
728 /* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */
729 if (unlikely((mask
& IN_MASK_ADD
) && (mask
& IN_MASK_CREATE
))) {
734 /* verify that this is indeed an inotify instance */
735 if (unlikely(f
.file
->f_op
!= &inotify_fops
)) {
740 if (!(mask
& IN_DONT_FOLLOW
))
741 flags
|= LOOKUP_FOLLOW
;
742 if (mask
& IN_ONLYDIR
)
743 flags
|= LOOKUP_DIRECTORY
;
745 ret
= inotify_find_inode(pathname
, &path
, flags
,
746 (mask
& IN_ALL_EVENTS
));
750 /* inode held in place by reference to path; group by fget on fd */
751 inode
= path
.dentry
->d_inode
;
752 group
= f
.file
->private_data
;
754 /* create/update an inode mark */
755 ret
= inotify_update_watch(group
, inode
, mask
);
762 SYSCALL_DEFINE2(inotify_rm_watch
, int, fd
, __s32
, wd
)
764 struct fsnotify_group
*group
;
765 struct inotify_inode_mark
*i_mark
;
770 if (unlikely(!f
.file
))
773 /* verify that this is indeed an inotify instance */
775 if (unlikely(f
.file
->f_op
!= &inotify_fops
))
778 group
= f
.file
->private_data
;
781 i_mark
= inotify_idr_find(group
, wd
);
782 if (unlikely(!i_mark
))
787 fsnotify_destroy_mark(&i_mark
->fsn_mark
, group
);
789 /* match ref taken by inotify_idr_find */
790 fsnotify_put_mark(&i_mark
->fsn_mark
);
798 * inotify_user_setup - Our initialization function. Note that we cannot return
799 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
800 * must result in panic().
802 static int __init
inotify_user_setup(void)
804 BUILD_BUG_ON(IN_ACCESS
!= FS_ACCESS
);
805 BUILD_BUG_ON(IN_MODIFY
!= FS_MODIFY
);
806 BUILD_BUG_ON(IN_ATTRIB
!= FS_ATTRIB
);
807 BUILD_BUG_ON(IN_CLOSE_WRITE
!= FS_CLOSE_WRITE
);
808 BUILD_BUG_ON(IN_CLOSE_NOWRITE
!= FS_CLOSE_NOWRITE
);
809 BUILD_BUG_ON(IN_OPEN
!= FS_OPEN
);
810 BUILD_BUG_ON(IN_MOVED_FROM
!= FS_MOVED_FROM
);
811 BUILD_BUG_ON(IN_MOVED_TO
!= FS_MOVED_TO
);
812 BUILD_BUG_ON(IN_CREATE
!= FS_CREATE
);
813 BUILD_BUG_ON(IN_DELETE
!= FS_DELETE
);
814 BUILD_BUG_ON(IN_DELETE_SELF
!= FS_DELETE_SELF
);
815 BUILD_BUG_ON(IN_MOVE_SELF
!= FS_MOVE_SELF
);
816 BUILD_BUG_ON(IN_UNMOUNT
!= FS_UNMOUNT
);
817 BUILD_BUG_ON(IN_Q_OVERFLOW
!= FS_Q_OVERFLOW
);
818 BUILD_BUG_ON(IN_IGNORED
!= FS_IN_IGNORED
);
819 BUILD_BUG_ON(IN_EXCL_UNLINK
!= FS_EXCL_UNLINK
);
820 BUILD_BUG_ON(IN_ISDIR
!= FS_ISDIR
);
821 BUILD_BUG_ON(IN_ONESHOT
!= FS_IN_ONESHOT
);
823 BUILD_BUG_ON(HWEIGHT32(ALL_INOTIFY_BITS
) != 22);
825 inotify_inode_mark_cachep
= KMEM_CACHE(inotify_inode_mark
,
826 SLAB_PANIC
|SLAB_ACCOUNT
);
828 inotify_max_queued_events
= 16384;
829 init_user_ns
.ucount_max
[UCOUNT_INOTIFY_INSTANCES
] = 128;
830 init_user_ns
.ucount_max
[UCOUNT_INOTIFY_WATCHES
] = 8192;
834 fs_initcall(inotify_user_setup
);