2 * fs/inotify_user.c - inotify support for userspace
5 * John McCutchan <ttb@tentacle.dhs.org>
6 * Robert Love <rml@novell.com>
8 * Copyright (C) 2005 John McCutchan
9 * Copyright 2006 Hewlett-Packard Development Company, L.P.
11 * Copyright (C) 2009 Eric Paris <Red Hat Inc>
12 * inotify was largely rewriten to make use of the fsnotify infrastructure
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2, or (at your option) any
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
25 #include <linux/file.h>
26 #include <linux/fs.h> /* struct inode */
27 #include <linux/fsnotify_backend.h>
28 #include <linux/idr.h>
29 #include <linux/init.h> /* fs_initcall */
30 #include <linux/inotify.h>
31 #include <linux/kernel.h> /* roundup() */
32 #include <linux/namei.h> /* LOOKUP_FOLLOW */
33 #include <linux/sched/signal.h>
34 #include <linux/slab.h> /* struct kmem_cache */
35 #include <linux/syscalls.h>
36 #include <linux/types.h>
37 #include <linux/anon_inodes.h>
38 #include <linux/uaccess.h>
39 #include <linux/poll.h>
40 #include <linux/wait.h>
41 #include <linux/memcontrol.h>
44 #include "../fdinfo.h"
46 #include <asm/ioctls.h>
48 /* configurable via /proc/sys/fs/inotify/ */
49 static int inotify_max_queued_events __read_mostly
;
51 struct kmem_cache
*inotify_inode_mark_cachep __read_mostly
;
55 #include <linux/sysctl.h>
59 struct ctl_table inotify_table
[] = {
61 .procname
= "max_user_instances",
62 .data
= &init_user_ns
.ucount_max
[UCOUNT_INOTIFY_INSTANCES
],
63 .maxlen
= sizeof(int),
65 .proc_handler
= proc_dointvec_minmax
,
69 .procname
= "max_user_watches",
70 .data
= &init_user_ns
.ucount_max
[UCOUNT_INOTIFY_WATCHES
],
71 .maxlen
= sizeof(int),
73 .proc_handler
= proc_dointvec_minmax
,
77 .procname
= "max_queued_events",
78 .data
= &inotify_max_queued_events
,
79 .maxlen
= sizeof(int),
81 .proc_handler
= proc_dointvec_minmax
,
86 #endif /* CONFIG_SYSCTL */
88 static inline __u32
inotify_arg_to_mask(u32 arg
)
93 * everything should accept their own ignored, cares about children,
94 * and should receive events when the inode is unmounted
96 mask
= (FS_IN_IGNORED
| FS_EVENT_ON_CHILD
| FS_UNMOUNT
);
98 /* mask off the flags used to open the fd */
99 mask
|= (arg
& (IN_ALL_EVENTS
| IN_ONESHOT
| IN_EXCL_UNLINK
));
104 static inline u32
inotify_mask_to_arg(__u32 mask
)
106 return mask
& (IN_ALL_EVENTS
| IN_ISDIR
| IN_UNMOUNT
| IN_IGNORED
|
110 /* intofiy userspace file descriptor functions */
111 static __poll_t
inotify_poll(struct file
*file
, poll_table
*wait
)
113 struct fsnotify_group
*group
= file
->private_data
;
116 poll_wait(file
, &group
->notification_waitq
, wait
);
117 spin_lock(&group
->notification_lock
);
118 if (!fsnotify_notify_queue_is_empty(group
))
119 ret
= EPOLLIN
| EPOLLRDNORM
;
120 spin_unlock(&group
->notification_lock
);
125 static int round_event_name_len(struct fsnotify_event
*fsn_event
)
127 struct inotify_event_info
*event
;
129 event
= INOTIFY_E(fsn_event
);
130 if (!event
->name_len
)
132 return roundup(event
->name_len
+ 1, sizeof(struct inotify_event
));
136 * Get an inotify_kernel_event if one exists and is small
137 * enough to fit in "count". Return an error pointer if
140 * Called with the group->notification_lock held.
142 static struct fsnotify_event
*get_one_event(struct fsnotify_group
*group
,
145 size_t event_size
= sizeof(struct inotify_event
);
146 struct fsnotify_event
*event
;
148 if (fsnotify_notify_queue_is_empty(group
))
151 event
= fsnotify_peek_first_event(group
);
153 pr_debug("%s: group=%p event=%p\n", __func__
, group
, event
);
155 event_size
+= round_event_name_len(event
);
156 if (event_size
> count
)
157 return ERR_PTR(-EINVAL
);
159 /* held the notification_lock the whole time, so this is the
160 * same event we peeked above */
161 fsnotify_remove_first_event(group
);
167 * Copy an event to user space, returning how much we copied.
169 * We already checked that the event size is smaller than the
170 * buffer we had in "get_one_event()" above.
172 static ssize_t
copy_event_to_user(struct fsnotify_group
*group
,
173 struct fsnotify_event
*fsn_event
,
176 struct inotify_event inotify_event
;
177 struct inotify_event_info
*event
;
178 size_t event_size
= sizeof(struct inotify_event
);
182 pr_debug("%s: group=%p event=%p\n", __func__
, group
, fsn_event
);
184 event
= INOTIFY_E(fsn_event
);
185 name_len
= event
->name_len
;
187 * round up name length so it is a multiple of event_size
188 * plus an extra byte for the terminating '\0'.
190 pad_name_len
= round_event_name_len(fsn_event
);
191 inotify_event
.len
= pad_name_len
;
192 inotify_event
.mask
= inotify_mask_to_arg(fsn_event
->mask
);
193 inotify_event
.wd
= event
->wd
;
194 inotify_event
.cookie
= event
->sync_cookie
;
196 /* send the main event */
197 if (copy_to_user(buf
, &inotify_event
, event_size
))
203 * fsnotify only stores the pathname, so here we have to send the pathname
204 * and then pad that pathname out to a multiple of sizeof(inotify_event)
208 /* copy the path name */
209 if (copy_to_user(buf
, event
->name
, name_len
))
213 /* fill userspace with 0's */
214 if (clear_user(buf
, pad_name_len
- name_len
))
216 event_size
+= pad_name_len
;
222 static ssize_t
inotify_read(struct file
*file
, char __user
*buf
,
223 size_t count
, loff_t
*pos
)
225 struct fsnotify_group
*group
;
226 struct fsnotify_event
*kevent
;
229 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
232 group
= file
->private_data
;
234 add_wait_queue(&group
->notification_waitq
, &wait
);
236 spin_lock(&group
->notification_lock
);
237 kevent
= get_one_event(group
, count
);
238 spin_unlock(&group
->notification_lock
);
240 pr_debug("%s: group=%p kevent=%p\n", __func__
, group
, kevent
);
243 ret
= PTR_ERR(kevent
);
246 ret
= copy_event_to_user(group
, kevent
, buf
);
247 fsnotify_destroy_event(group
, kevent
);
256 if (file
->f_flags
& O_NONBLOCK
)
259 if (signal_pending(current
))
265 wait_woken(&wait
, TASK_INTERRUPTIBLE
, MAX_SCHEDULE_TIMEOUT
);
267 remove_wait_queue(&group
->notification_waitq
, &wait
);
269 if (start
!= buf
&& ret
!= -EFAULT
)
274 static int inotify_release(struct inode
*ignored
, struct file
*file
)
276 struct fsnotify_group
*group
= file
->private_data
;
278 pr_debug("%s: group=%p\n", __func__
, group
);
280 /* free this group, matching get was inotify_init->fsnotify_obtain_group */
281 fsnotify_destroy_group(group
);
286 static long inotify_ioctl(struct file
*file
, unsigned int cmd
,
289 struct fsnotify_group
*group
;
290 struct fsnotify_event
*fsn_event
;
295 group
= file
->private_data
;
296 p
= (void __user
*) arg
;
298 pr_debug("%s: group=%p cmd=%u\n", __func__
, group
, cmd
);
302 spin_lock(&group
->notification_lock
);
303 list_for_each_entry(fsn_event
, &group
->notification_list
,
305 send_len
+= sizeof(struct inotify_event
);
306 send_len
+= round_event_name_len(fsn_event
);
308 spin_unlock(&group
->notification_lock
);
309 ret
= put_user(send_len
, (int __user
*) p
);
311 #ifdef CONFIG_CHECKPOINT_RESTORE
312 case INOTIFY_IOC_SETNEXTWD
:
314 if (arg
>= 1 && arg
<= INT_MAX
) {
315 struct inotify_group_private_data
*data
;
317 data
= &group
->inotify_data
;
318 spin_lock(&data
->idr_lock
);
319 idr_set_cursor(&data
->idr
, (unsigned int)arg
);
320 spin_unlock(&data
->idr_lock
);
324 #endif /* CONFIG_CHECKPOINT_RESTORE */
330 static const struct file_operations inotify_fops
= {
331 .show_fdinfo
= inotify_show_fdinfo
,
332 .poll
= inotify_poll
,
333 .read
= inotify_read
,
334 .fasync
= fsnotify_fasync
,
335 .release
= inotify_release
,
336 .unlocked_ioctl
= inotify_ioctl
,
337 .compat_ioctl
= inotify_ioctl
,
338 .llseek
= noop_llseek
,
343 * find_inode - resolve a user-given path to a specific inode
345 static int inotify_find_inode(const char __user
*dirname
, struct path
*path
, unsigned flags
)
349 error
= user_path_at(AT_FDCWD
, dirname
, flags
, path
);
352 /* you can only watch an inode if you have read permissions on it */
353 error
= inode_permission(path
->dentry
->d_inode
, MAY_READ
);
359 static int inotify_add_to_idr(struct idr
*idr
, spinlock_t
*idr_lock
,
360 struct inotify_inode_mark
*i_mark
)
364 idr_preload(GFP_KERNEL
);
367 ret
= idr_alloc_cyclic(idr
, i_mark
, 1, 0, GFP_NOWAIT
);
369 /* we added the mark to the idr, take a reference */
371 fsnotify_get_mark(&i_mark
->fsn_mark
);
374 spin_unlock(idr_lock
);
376 return ret
< 0 ? ret
: 0;
379 static struct inotify_inode_mark
*inotify_idr_find_locked(struct fsnotify_group
*group
,
382 struct idr
*idr
= &group
->inotify_data
.idr
;
383 spinlock_t
*idr_lock
= &group
->inotify_data
.idr_lock
;
384 struct inotify_inode_mark
*i_mark
;
386 assert_spin_locked(idr_lock
);
388 i_mark
= idr_find(idr
, wd
);
390 struct fsnotify_mark
*fsn_mark
= &i_mark
->fsn_mark
;
392 fsnotify_get_mark(fsn_mark
);
393 /* One ref for being in the idr, one ref we just took */
394 BUG_ON(refcount_read(&fsn_mark
->refcnt
) < 2);
400 static struct inotify_inode_mark
*inotify_idr_find(struct fsnotify_group
*group
,
403 struct inotify_inode_mark
*i_mark
;
404 spinlock_t
*idr_lock
= &group
->inotify_data
.idr_lock
;
407 i_mark
= inotify_idr_find_locked(group
, wd
);
408 spin_unlock(idr_lock
);
414 * Remove the mark from the idr (if present) and drop the reference
415 * on the mark because it was in the idr.
417 static void inotify_remove_from_idr(struct fsnotify_group
*group
,
418 struct inotify_inode_mark
*i_mark
)
420 struct idr
*idr
= &group
->inotify_data
.idr
;
421 spinlock_t
*idr_lock
= &group
->inotify_data
.idr_lock
;
422 struct inotify_inode_mark
*found_i_mark
= NULL
;
429 * does this i_mark think it is in the idr? we shouldn't get called
433 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
434 __func__
, i_mark
, i_mark
->wd
, i_mark
->fsn_mark
.group
);
438 /* Lets look in the idr to see if we find it */
439 found_i_mark
= inotify_idr_find_locked(group
, wd
);
440 if (unlikely(!found_i_mark
)) {
441 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
442 __func__
, i_mark
, i_mark
->wd
, i_mark
->fsn_mark
.group
);
447 * We found an mark in the idr at the right wd, but it's
448 * not the mark we were told to remove. eparis seriously
449 * fucked up somewhere.
451 if (unlikely(found_i_mark
!= i_mark
)) {
452 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p "
453 "found_i_mark=%p found_i_mark->wd=%d "
454 "found_i_mark->group=%p\n", __func__
, i_mark
,
455 i_mark
->wd
, i_mark
->fsn_mark
.group
, found_i_mark
,
456 found_i_mark
->wd
, found_i_mark
->fsn_mark
.group
);
461 * One ref for being in the idr
462 * one ref grabbed by inotify_idr_find
464 if (unlikely(refcount_read(&i_mark
->fsn_mark
.refcnt
) < 2)) {
465 printk(KERN_ERR
"%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
466 __func__
, i_mark
, i_mark
->wd
, i_mark
->fsn_mark
.group
);
467 /* we can't really recover with bad ref cnting.. */
472 /* Removed from the idr, drop that ref. */
473 fsnotify_put_mark(&i_mark
->fsn_mark
);
476 spin_unlock(idr_lock
);
477 /* match the ref taken by inotify_idr_find_locked() */
479 fsnotify_put_mark(&found_i_mark
->fsn_mark
);
483 * Send IN_IGNORED for this wd, remove this wd from the idr.
485 void inotify_ignored_and_remove_idr(struct fsnotify_mark
*fsn_mark
,
486 struct fsnotify_group
*group
)
488 struct inotify_inode_mark
*i_mark
;
489 struct fsnotify_iter_info iter_info
= { };
491 fsnotify_iter_set_report_type_mark(&iter_info
, FSNOTIFY_OBJ_TYPE_INODE
,
494 /* Queue ignore event for the watch */
495 inotify_handle_event(group
, NULL
, FS_IN_IGNORED
, NULL
,
496 FSNOTIFY_EVENT_NONE
, NULL
, 0, &iter_info
);
498 i_mark
= container_of(fsn_mark
, struct inotify_inode_mark
, fsn_mark
);
499 /* remove this mark from the idr */
500 inotify_remove_from_idr(group
, i_mark
);
502 dec_inotify_watches(group
->inotify_data
.ucounts
);
505 static int inotify_update_existing_watch(struct fsnotify_group
*group
,
509 struct fsnotify_mark
*fsn_mark
;
510 struct inotify_inode_mark
*i_mark
;
511 __u32 old_mask
, new_mask
;
513 int add
= (arg
& IN_MASK_ADD
);
514 int create
= (arg
& IN_MASK_CREATE
);
517 mask
= inotify_arg_to_mask(arg
);
519 fsn_mark
= fsnotify_find_mark(&inode
->i_fsnotify_marks
, group
);
525 i_mark
= container_of(fsn_mark
, struct inotify_inode_mark
, fsn_mark
);
527 spin_lock(&fsn_mark
->lock
);
528 old_mask
= fsn_mark
->mask
;
530 fsn_mark
->mask
|= mask
;
532 fsn_mark
->mask
= mask
;
533 new_mask
= fsn_mark
->mask
;
534 spin_unlock(&fsn_mark
->lock
);
536 if (old_mask
!= new_mask
) {
537 /* more bits in old than in new? */
538 int dropped
= (old_mask
& ~new_mask
);
539 /* more bits in this fsn_mark than the inode's mask? */
540 int do_inode
= (new_mask
& ~inode
->i_fsnotify_mask
);
542 /* update the inode with this new fsn_mark */
543 if (dropped
|| do_inode
)
544 fsnotify_recalc_mask(inode
->i_fsnotify_marks
);
551 /* match the get from fsnotify_find_mark() */
552 fsnotify_put_mark(fsn_mark
);
557 static int inotify_new_watch(struct fsnotify_group
*group
,
561 struct inotify_inode_mark
*tmp_i_mark
;
564 struct idr
*idr
= &group
->inotify_data
.idr
;
565 spinlock_t
*idr_lock
= &group
->inotify_data
.idr_lock
;
567 mask
= inotify_arg_to_mask(arg
);
569 tmp_i_mark
= kmem_cache_alloc(inotify_inode_mark_cachep
, GFP_KERNEL
);
570 if (unlikely(!tmp_i_mark
))
573 fsnotify_init_mark(&tmp_i_mark
->fsn_mark
, group
);
574 tmp_i_mark
->fsn_mark
.mask
= mask
;
577 ret
= inotify_add_to_idr(idr
, idr_lock
, tmp_i_mark
);
581 /* increment the number of watches the user has */
582 if (!inc_inotify_watches(group
->inotify_data
.ucounts
)) {
583 inotify_remove_from_idr(group
, tmp_i_mark
);
588 /* we are on the idr, now get on the inode */
589 ret
= fsnotify_add_inode_mark_locked(&tmp_i_mark
->fsn_mark
, inode
, 0);
591 /* we failed to get on the inode, get off the idr */
592 inotify_remove_from_idr(group
, tmp_i_mark
);
597 /* return the watch descriptor for this new mark */
598 ret
= tmp_i_mark
->wd
;
601 /* match the ref from fsnotify_init_mark() */
602 fsnotify_put_mark(&tmp_i_mark
->fsn_mark
);
607 static int inotify_update_watch(struct fsnotify_group
*group
, struct inode
*inode
, u32 arg
)
611 mutex_lock(&group
->mark_mutex
);
612 /* try to update and existing watch with the new arg */
613 ret
= inotify_update_existing_watch(group
, inode
, arg
);
614 /* no mark present, try to add a new one */
616 ret
= inotify_new_watch(group
, inode
, arg
);
617 mutex_unlock(&group
->mark_mutex
);
622 static struct fsnotify_group
*inotify_new_group(unsigned int max_events
)
624 struct fsnotify_group
*group
;
625 struct inotify_event_info
*oevent
;
627 group
= fsnotify_alloc_group(&inotify_fsnotify_ops
);
631 oevent
= kmalloc(sizeof(struct inotify_event_info
), GFP_KERNEL
);
632 if (unlikely(!oevent
)) {
633 fsnotify_destroy_group(group
);
634 return ERR_PTR(-ENOMEM
);
636 group
->overflow_event
= &oevent
->fse
;
637 fsnotify_init_event(group
->overflow_event
, NULL
, FS_Q_OVERFLOW
);
639 oevent
->sync_cookie
= 0;
640 oevent
->name_len
= 0;
642 group
->max_events
= max_events
;
643 group
->memcg
= get_mem_cgroup_from_mm(current
->mm
);
645 spin_lock_init(&group
->inotify_data
.idr_lock
);
646 idr_init(&group
->inotify_data
.idr
);
647 group
->inotify_data
.ucounts
= inc_ucount(current_user_ns(),
649 UCOUNT_INOTIFY_INSTANCES
);
651 if (!group
->inotify_data
.ucounts
) {
652 fsnotify_destroy_group(group
);
653 return ERR_PTR(-EMFILE
);
660 /* inotify syscalls */
661 static int do_inotify_init(int flags
)
663 struct fsnotify_group
*group
;
666 /* Check the IN_* constants for consistency. */
667 BUILD_BUG_ON(IN_CLOEXEC
!= O_CLOEXEC
);
668 BUILD_BUG_ON(IN_NONBLOCK
!= O_NONBLOCK
);
670 if (flags
& ~(IN_CLOEXEC
| IN_NONBLOCK
))
673 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
674 group
= inotify_new_group(inotify_max_queued_events
);
676 return PTR_ERR(group
);
678 ret
= anon_inode_getfd("inotify", &inotify_fops
, group
,
681 fsnotify_destroy_group(group
);
686 SYSCALL_DEFINE1(inotify_init1
, int, flags
)
688 return do_inotify_init(flags
);
691 SYSCALL_DEFINE0(inotify_init
)
693 return do_inotify_init(0);
696 SYSCALL_DEFINE3(inotify_add_watch
, int, fd
, const char __user
*, pathname
,
699 struct fsnotify_group
*group
;
707 * We share a lot of code with fs/dnotify. We also share
708 * the bit layout between inotify's IN_* and the fsnotify
709 * FS_*. This check ensures that only the inotify IN_*
710 * bits get passed in and set in watches/events.
712 if (unlikely(mask
& ~ALL_INOTIFY_BITS
))
715 * Require at least one valid bit set in the mask.
716 * Without _something_ set, we would have no events to
719 if (unlikely(!(mask
& ALL_INOTIFY_BITS
)))
723 if (unlikely(!f
.file
))
726 /* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */
727 if (unlikely((mask
& IN_MASK_ADD
) && (mask
& IN_MASK_CREATE
)))
730 /* verify that this is indeed an inotify instance */
731 if (unlikely(f
.file
->f_op
!= &inotify_fops
)) {
736 if (!(mask
& IN_DONT_FOLLOW
))
737 flags
|= LOOKUP_FOLLOW
;
738 if (mask
& IN_ONLYDIR
)
739 flags
|= LOOKUP_DIRECTORY
;
741 ret
= inotify_find_inode(pathname
, &path
, flags
);
745 /* inode held in place by reference to path; group by fget on fd */
746 inode
= path
.dentry
->d_inode
;
747 group
= f
.file
->private_data
;
749 /* create/update an inode mark */
750 ret
= inotify_update_watch(group
, inode
, mask
);
757 SYSCALL_DEFINE2(inotify_rm_watch
, int, fd
, __s32
, wd
)
759 struct fsnotify_group
*group
;
760 struct inotify_inode_mark
*i_mark
;
765 if (unlikely(!f
.file
))
768 /* verify that this is indeed an inotify instance */
770 if (unlikely(f
.file
->f_op
!= &inotify_fops
))
773 group
= f
.file
->private_data
;
776 i_mark
= inotify_idr_find(group
, wd
);
777 if (unlikely(!i_mark
))
782 fsnotify_destroy_mark(&i_mark
->fsn_mark
, group
);
784 /* match ref taken by inotify_idr_find */
785 fsnotify_put_mark(&i_mark
->fsn_mark
);
793 * inotify_user_setup - Our initialization function. Note that we cannot return
794 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
795 * must result in panic().
797 static int __init
inotify_user_setup(void)
799 BUILD_BUG_ON(IN_ACCESS
!= FS_ACCESS
);
800 BUILD_BUG_ON(IN_MODIFY
!= FS_MODIFY
);
801 BUILD_BUG_ON(IN_ATTRIB
!= FS_ATTRIB
);
802 BUILD_BUG_ON(IN_CLOSE_WRITE
!= FS_CLOSE_WRITE
);
803 BUILD_BUG_ON(IN_CLOSE_NOWRITE
!= FS_CLOSE_NOWRITE
);
804 BUILD_BUG_ON(IN_OPEN
!= FS_OPEN
);
805 BUILD_BUG_ON(IN_MOVED_FROM
!= FS_MOVED_FROM
);
806 BUILD_BUG_ON(IN_MOVED_TO
!= FS_MOVED_TO
);
807 BUILD_BUG_ON(IN_CREATE
!= FS_CREATE
);
808 BUILD_BUG_ON(IN_DELETE
!= FS_DELETE
);
809 BUILD_BUG_ON(IN_DELETE_SELF
!= FS_DELETE_SELF
);
810 BUILD_BUG_ON(IN_MOVE_SELF
!= FS_MOVE_SELF
);
811 BUILD_BUG_ON(IN_UNMOUNT
!= FS_UNMOUNT
);
812 BUILD_BUG_ON(IN_Q_OVERFLOW
!= FS_Q_OVERFLOW
);
813 BUILD_BUG_ON(IN_IGNORED
!= FS_IN_IGNORED
);
814 BUILD_BUG_ON(IN_EXCL_UNLINK
!= FS_EXCL_UNLINK
);
815 BUILD_BUG_ON(IN_ISDIR
!= FS_ISDIR
);
816 BUILD_BUG_ON(IN_ONESHOT
!= FS_IN_ONESHOT
);
818 BUILD_BUG_ON(HWEIGHT32(ALL_INOTIFY_BITS
) != 22);
820 inotify_inode_mark_cachep
= KMEM_CACHE(inotify_inode_mark
,
821 SLAB_PANIC
|SLAB_ACCOUNT
);
823 inotify_max_queued_events
= 16384;
824 init_user_ns
.ucount_max
[UCOUNT_INOTIFY_INSTANCES
] = 128;
825 init_user_ns
.ucount_max
[UCOUNT_INOTIFY_WATCHES
] = 8192;
829 fs_initcall(inotify_user_setup
);