2 * fs/inotify_user.c - inotify support for userspace
5 * John McCutchan <ttb@tentacle.dhs.org>
6 * Robert Love <rml@novell.com>
8 * Copyright (C) 2005 John McCutchan
9 * Copyright 2006 Hewlett-Packard Development Company, L.P.
11 * Copyright (C) 2009 Eric Paris <Red Hat Inc>
12 * inotify was largely rewriten to make use of the fsnotify infrastructure
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2, or (at your option) any
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
25 #include <linux/file.h>
26 #include <linux/fs.h> /* struct inode */
27 #include <linux/fsnotify_backend.h>
28 #include <linux/idr.h>
29 #include <linux/init.h> /* module_init */
30 #include <linux/inotify.h>
31 #include <linux/kernel.h> /* roundup() */
32 #include <linux/namei.h> /* LOOKUP_FOLLOW */
33 #include <linux/sched.h> /* struct user */
34 #include <linux/slab.h> /* struct kmem_cache */
35 #include <linux/syscalls.h>
36 #include <linux/types.h>
37 #include <linux/anon_inodes.h>
38 #include <linux/uaccess.h>
39 #include <linux/poll.h>
40 #include <linux/wait.h>
43 #include "../fdinfo.h"
45 #include <asm/ioctls.h>
47 /* these are configurable via /proc/sys/fs/inotify/ */
48 static int inotify_max_user_instances __read_mostly
;
49 static int inotify_max_queued_events __read_mostly
;
50 static int inotify_max_user_watches __read_mostly
;
52 static struct kmem_cache
*inotify_inode_mark_cachep __read_mostly
;
53 struct kmem_cache
*event_priv_cachep __read_mostly
;
57 #include <linux/sysctl.h>
61 ctl_table inotify_table
[] = {
63 .procname
= "max_user_instances",
64 .data
= &inotify_max_user_instances
,
65 .maxlen
= sizeof(int),
67 .proc_handler
= proc_dointvec_minmax
,
71 .procname
= "max_user_watches",
72 .data
= &inotify_max_user_watches
,
73 .maxlen
= sizeof(int),
75 .proc_handler
= proc_dointvec_minmax
,
79 .procname
= "max_queued_events",
80 .data
= &inotify_max_queued_events
,
81 .maxlen
= sizeof(int),
83 .proc_handler
= proc_dointvec_minmax
,
88 #endif /* CONFIG_SYSCTL */
90 static inline __u32
inotify_arg_to_mask(u32 arg
)
95 * everything should accept their own ignored, cares about children,
96 * and should receive events when the inode is unmounted
98 mask
= (FS_IN_IGNORED
| FS_EVENT_ON_CHILD
| FS_UNMOUNT
);
100 /* mask off the flags used to open the fd */
101 mask
|= (arg
& (IN_ALL_EVENTS
| IN_ONESHOT
| IN_EXCL_UNLINK
));
106 static inline u32
inotify_mask_to_arg(__u32 mask
)
108 return mask
& (IN_ALL_EVENTS
| IN_ISDIR
| IN_UNMOUNT
| IN_IGNORED
|
112 /* intofiy userspace file descriptor functions */
113 static unsigned int inotify_poll(struct file
*file
, poll_table
*wait
)
115 struct fsnotify_group
*group
= file
->private_data
;
118 poll_wait(file
, &group
->notification_waitq
, wait
);
119 mutex_lock(&group
->notification_mutex
);
120 if (!fsnotify_notify_queue_is_empty(group
))
121 ret
= POLLIN
| POLLRDNORM
;
122 mutex_unlock(&group
->notification_mutex
);
128 * Get an inotify_kernel_event if one exists and is small
129 * enough to fit in "count". Return an error pointer if
132 * Called with the group->notification_mutex held.
134 static struct fsnotify_event
*get_one_event(struct fsnotify_group
*group
,
137 size_t event_size
= sizeof(struct inotify_event
);
138 struct fsnotify_event
*event
;
140 if (fsnotify_notify_queue_is_empty(group
))
143 event
= fsnotify_peek_notify_event(group
);
145 pr_debug("%s: group=%p event=%p\n", __func__
, group
, event
);
148 event_size
+= roundup(event
->name_len
+ 1, event_size
);
150 if (event_size
> count
)
151 return ERR_PTR(-EINVAL
);
153 /* held the notification_mutex the whole time, so this is the
154 * same event we peeked above */
155 fsnotify_remove_notify_event(group
);
161 * Copy an event to user space, returning how much we copied.
163 * We already checked that the event size is smaller than the
164 * buffer we had in "get_one_event()" above.
166 static ssize_t
copy_event_to_user(struct fsnotify_group
*group
,
167 struct fsnotify_event
*event
,
170 struct inotify_event inotify_event
;
171 struct fsnotify_event_private_data
*fsn_priv
;
172 struct inotify_event_private_data
*priv
;
173 size_t event_size
= sizeof(struct inotify_event
);
176 pr_debug("%s: group=%p event=%p\n", __func__
, group
, event
);
178 /* we get the inotify watch descriptor from the event private data */
179 spin_lock(&event
->lock
);
180 fsn_priv
= fsnotify_remove_priv_from_event(group
, event
);
181 spin_unlock(&event
->lock
);
184 inotify_event
.wd
= -1;
186 priv
= container_of(fsn_priv
, struct inotify_event_private_data
,
187 fsnotify_event_priv_data
);
188 inotify_event
.wd
= priv
->wd
;
189 inotify_free_event_priv(fsn_priv
);
193 * round up event->name_len so it is a multiple of event_size
194 * plus an extra byte for the terminating '\0'.
197 name_len
= roundup(event
->name_len
+ 1, event_size
);
198 inotify_event
.len
= name_len
;
200 inotify_event
.mask
= inotify_mask_to_arg(event
->mask
);
201 inotify_event
.cookie
= event
->sync_cookie
;
203 /* send the main event */
204 if (copy_to_user(buf
, &inotify_event
, event_size
))
210 * fsnotify only stores the pathname, so here we have to send the pathname
211 * and then pad that pathname out to a multiple of sizeof(inotify_event)
212 * with zeros. I get my zeros from the nul_inotify_event.
215 unsigned int len_to_zero
= name_len
- event
->name_len
;
216 /* copy the path name */
217 if (copy_to_user(buf
, event
->file_name
, event
->name_len
))
219 buf
+= event
->name_len
;
221 /* fill userspace with 0's */
222 if (clear_user(buf
, len_to_zero
))
225 event_size
+= name_len
;
231 static ssize_t
inotify_read(struct file
*file
, char __user
*buf
,
232 size_t count
, loff_t
*pos
)
234 struct fsnotify_group
*group
;
235 struct fsnotify_event
*kevent
;
241 group
= file
->private_data
;
244 prepare_to_wait(&group
->notification_waitq
, &wait
, TASK_INTERRUPTIBLE
);
246 mutex_lock(&group
->notification_mutex
);
247 kevent
= get_one_event(group
, count
);
248 mutex_unlock(&group
->notification_mutex
);
250 pr_debug("%s: group=%p kevent=%p\n", __func__
, group
, kevent
);
253 ret
= PTR_ERR(kevent
);
256 ret
= copy_event_to_user(group
, kevent
, buf
);
257 fsnotify_put_event(kevent
);
266 if (file
->f_flags
& O_NONBLOCK
)
269 if (signal_pending(current
))
278 finish_wait(&group
->notification_waitq
, &wait
);
279 if (start
!= buf
&& ret
!= -EFAULT
)
284 static int inotify_release(struct inode
*ignored
, struct file
*file
)
286 struct fsnotify_group
*group
= file
->private_data
;
288 pr_debug("%s: group=%p\n", __func__
, group
);
290 /* free this group, matching get was inotify_init->fsnotify_obtain_group */
291 fsnotify_destroy_group(group
);
296 static long inotify_ioctl(struct file
*file
, unsigned int cmd
,
299 struct fsnotify_group
*group
;
300 struct fsnotify_event_holder
*holder
;
301 struct fsnotify_event
*event
;
306 group
= file
->private_data
;
307 p
= (void __user
*) arg
;
309 pr_debug("%s: group=%p cmd=%u\n", __func__
, group
, cmd
);
313 mutex_lock(&group
->notification_mutex
);
314 list_for_each_entry(holder
, &group
->notification_list
, event_list
) {
315 event
= holder
->event
;
316 send_len
+= sizeof(struct inotify_event
);
318 send_len
+= roundup(event
->name_len
+ 1,
319 sizeof(struct inotify_event
));
321 mutex_unlock(&group
->notification_mutex
);
322 ret
= put_user(send_len
, (int __user
*) p
);
329 static const struct file_operations inotify_fops
= {
330 .show_fdinfo
= inotify_show_fdinfo
,
331 .poll
= inotify_poll
,
332 .read
= inotify_read
,
333 .fasync
= fsnotify_fasync
,
334 .release
= inotify_release
,
335 .unlocked_ioctl
= inotify_ioctl
,
336 .compat_ioctl
= inotify_ioctl
,
337 .llseek
= noop_llseek
,
342 * find_inode - resolve a user-given path to a specific inode
344 static int inotify_find_inode(const char __user
*dirname
, struct path
*path
, unsigned flags
)
348 error
= user_path_at(AT_FDCWD
, dirname
, flags
, path
);
351 /* you can only watch an inode if you have read permissions on it */
352 error
= inode_permission(path
->dentry
->d_inode
, MAY_READ
);
358 static int inotify_add_to_idr(struct idr
*idr
, spinlock_t
*idr_lock
,
359 struct inotify_inode_mark
*i_mark
)
363 idr_preload(GFP_KERNEL
);
366 ret
= idr_alloc_cyclic(idr
, i_mark
, 1, 0, GFP_NOWAIT
);
368 /* we added the mark to the idr, take a reference */
370 fsnotify_get_mark(&i_mark
->fsn_mark
);
373 spin_unlock(idr_lock
);
375 return ret
< 0 ? ret
: 0;
378 static struct inotify_inode_mark
*inotify_idr_find_locked(struct fsnotify_group
*group
,
381 struct idr
*idr
= &group
->inotify_data
.idr
;
382 spinlock_t
*idr_lock
= &group
->inotify_data
.idr_lock
;
383 struct inotify_inode_mark
*i_mark
;
385 assert_spin_locked(idr_lock
);
387 i_mark
= idr_find(idr
, wd
);
389 struct fsnotify_mark
*fsn_mark
= &i_mark
->fsn_mark
;
391 fsnotify_get_mark(fsn_mark
);
392 /* One ref for being in the idr, one ref we just took */
393 BUG_ON(atomic_read(&fsn_mark
->refcnt
) < 2);
399 static struct inotify_inode_mark
*inotify_idr_find(struct fsnotify_group
*group
,
402 struct inotify_inode_mark
*i_mark
;
403 spinlock_t
*idr_lock
= &group
->inotify_data
.idr_lock
;
406 i_mark
= inotify_idr_find_locked(group
, wd
);
407 spin_unlock(idr_lock
);
412 static void do_inotify_remove_from_idr(struct fsnotify_group
*group
,
413 struct inotify_inode_mark
*i_mark
)
415 struct idr
*idr
= &group
->inotify_data
.idr
;
416 spinlock_t
*idr_lock
= &group
->inotify_data
.idr_lock
;
419 assert_spin_locked(idr_lock
);
423 /* removed from the idr, drop that ref */
424 fsnotify_put_mark(&i_mark
->fsn_mark
);
428 * Remove the mark from the idr (if present) and drop the reference
429 * on the mark because it was in the idr.
431 static void inotify_remove_from_idr(struct fsnotify_group
*group
,
432 struct inotify_inode_mark
*i_mark
)
434 spinlock_t
*idr_lock
= &group
->inotify_data
.idr_lock
;
435 struct inotify_inode_mark
*found_i_mark
= NULL
;
442 * does this i_mark think it is in the idr? we shouldn't get called
446 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
447 " i_mark->inode=%p\n", __func__
, i_mark
, i_mark
->wd
,
448 i_mark
->fsn_mark
.group
, i_mark
->fsn_mark
.i
.inode
);
452 /* Lets look in the idr to see if we find it */
453 found_i_mark
= inotify_idr_find_locked(group
, wd
);
454 if (unlikely(!found_i_mark
)) {
455 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
456 " i_mark->inode=%p\n", __func__
, i_mark
, i_mark
->wd
,
457 i_mark
->fsn_mark
.group
, i_mark
->fsn_mark
.i
.inode
);
462 * We found an mark in the idr at the right wd, but it's
463 * not the mark we were told to remove. eparis seriously
464 * fucked up somewhere.
466 if (unlikely(found_i_mark
!= i_mark
)) {
467 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p "
468 "mark->inode=%p found_i_mark=%p found_i_mark->wd=%d "
469 "found_i_mark->group=%p found_i_mark->inode=%p\n",
470 __func__
, i_mark
, i_mark
->wd
, i_mark
->fsn_mark
.group
,
471 i_mark
->fsn_mark
.i
.inode
, found_i_mark
, found_i_mark
->wd
,
472 found_i_mark
->fsn_mark
.group
,
473 found_i_mark
->fsn_mark
.i
.inode
);
478 * One ref for being in the idr
479 * one ref held by the caller trying to kill us
480 * one ref grabbed by inotify_idr_find
482 if (unlikely(atomic_read(&i_mark
->fsn_mark
.refcnt
) < 3)) {
483 printk(KERN_ERR
"%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
484 " i_mark->inode=%p\n", __func__
, i_mark
, i_mark
->wd
,
485 i_mark
->fsn_mark
.group
, i_mark
->fsn_mark
.i
.inode
);
486 /* we can't really recover with bad ref cnting.. */
490 do_inotify_remove_from_idr(group
, i_mark
);
492 /* match the ref taken by inotify_idr_find_locked() */
494 fsnotify_put_mark(&found_i_mark
->fsn_mark
);
496 spin_unlock(idr_lock
);
500 * Send IN_IGNORED for this wd, remove this wd from the idr.
502 void inotify_ignored_and_remove_idr(struct fsnotify_mark
*fsn_mark
,
503 struct fsnotify_group
*group
)
505 struct inotify_inode_mark
*i_mark
;
506 struct fsnotify_event
*ignored_event
, *notify_event
;
507 struct inotify_event_private_data
*event_priv
;
508 struct fsnotify_event_private_data
*fsn_event_priv
;
511 i_mark
= container_of(fsn_mark
, struct inotify_inode_mark
, fsn_mark
);
513 ignored_event
= fsnotify_create_event(NULL
, FS_IN_IGNORED
, NULL
,
514 FSNOTIFY_EVENT_NONE
, NULL
, 0,
517 goto skip_send_ignore
;
519 event_priv
= kmem_cache_alloc(event_priv_cachep
, GFP_NOFS
);
520 if (unlikely(!event_priv
))
521 goto skip_send_ignore
;
523 fsn_event_priv
= &event_priv
->fsnotify_event_priv_data
;
525 fsnotify_get_group(group
);
526 fsn_event_priv
->group
= group
;
527 event_priv
->wd
= i_mark
->wd
;
529 notify_event
= fsnotify_add_notify_event(group
, ignored_event
, fsn_event_priv
, NULL
);
531 if (IS_ERR(notify_event
))
532 ret
= PTR_ERR(notify_event
);
534 fsnotify_put_event(notify_event
);
535 inotify_free_event_priv(fsn_event_priv
);
539 /* matches the reference taken when the event was created */
541 fsnotify_put_event(ignored_event
);
543 /* remove this mark from the idr */
544 inotify_remove_from_idr(group
, i_mark
);
546 atomic_dec(&group
->inotify_data
.user
->inotify_watches
);
549 /* ding dong the mark is dead */
550 static void inotify_free_mark(struct fsnotify_mark
*fsn_mark
)
552 struct inotify_inode_mark
*i_mark
;
554 i_mark
= container_of(fsn_mark
, struct inotify_inode_mark
, fsn_mark
);
556 kmem_cache_free(inotify_inode_mark_cachep
, i_mark
);
559 static int inotify_update_existing_watch(struct fsnotify_group
*group
,
563 struct fsnotify_mark
*fsn_mark
;
564 struct inotify_inode_mark
*i_mark
;
565 __u32 old_mask
, new_mask
;
567 int add
= (arg
& IN_MASK_ADD
);
570 mask
= inotify_arg_to_mask(arg
);
572 fsn_mark
= fsnotify_find_inode_mark(group
, inode
);
576 i_mark
= container_of(fsn_mark
, struct inotify_inode_mark
, fsn_mark
);
578 spin_lock(&fsn_mark
->lock
);
580 old_mask
= fsn_mark
->mask
;
582 fsnotify_set_mark_mask_locked(fsn_mark
, (fsn_mark
->mask
| mask
));
584 fsnotify_set_mark_mask_locked(fsn_mark
, mask
);
585 new_mask
= fsn_mark
->mask
;
587 spin_unlock(&fsn_mark
->lock
);
589 if (old_mask
!= new_mask
) {
590 /* more bits in old than in new? */
591 int dropped
= (old_mask
& ~new_mask
);
592 /* more bits in this fsn_mark than the inode's mask? */
593 int do_inode
= (new_mask
& ~inode
->i_fsnotify_mask
);
595 /* update the inode with this new fsn_mark */
596 if (dropped
|| do_inode
)
597 fsnotify_recalc_inode_mask(inode
);
604 /* match the get from fsnotify_find_mark() */
605 fsnotify_put_mark(fsn_mark
);
610 static int inotify_new_watch(struct fsnotify_group
*group
,
614 struct inotify_inode_mark
*tmp_i_mark
;
617 struct idr
*idr
= &group
->inotify_data
.idr
;
618 spinlock_t
*idr_lock
= &group
->inotify_data
.idr_lock
;
620 mask
= inotify_arg_to_mask(arg
);
622 tmp_i_mark
= kmem_cache_alloc(inotify_inode_mark_cachep
, GFP_KERNEL
);
623 if (unlikely(!tmp_i_mark
))
626 fsnotify_init_mark(&tmp_i_mark
->fsn_mark
, inotify_free_mark
);
627 tmp_i_mark
->fsn_mark
.mask
= mask
;
631 if (atomic_read(&group
->inotify_data
.user
->inotify_watches
) >= inotify_max_user_watches
)
634 ret
= inotify_add_to_idr(idr
, idr_lock
, tmp_i_mark
);
638 /* we are on the idr, now get on the inode */
639 ret
= fsnotify_add_mark_locked(&tmp_i_mark
->fsn_mark
, group
, inode
,
642 /* we failed to get on the inode, get off the idr */
643 inotify_remove_from_idr(group
, tmp_i_mark
);
647 /* increment the number of watches the user has */
648 atomic_inc(&group
->inotify_data
.user
->inotify_watches
);
650 /* return the watch descriptor for this new mark */
651 ret
= tmp_i_mark
->wd
;
654 /* match the ref from fsnotify_init_mark() */
655 fsnotify_put_mark(&tmp_i_mark
->fsn_mark
);
660 static int inotify_update_watch(struct fsnotify_group
*group
, struct inode
*inode
, u32 arg
)
664 mutex_lock(&group
->mark_mutex
);
665 /* try to update and existing watch with the new arg */
666 ret
= inotify_update_existing_watch(group
, inode
, arg
);
667 /* no mark present, try to add a new one */
669 ret
= inotify_new_watch(group
, inode
, arg
);
670 mutex_unlock(&group
->mark_mutex
);
675 static struct fsnotify_group
*inotify_new_group(unsigned int max_events
)
677 struct fsnotify_group
*group
;
679 group
= fsnotify_alloc_group(&inotify_fsnotify_ops
);
683 group
->max_events
= max_events
;
685 spin_lock_init(&group
->inotify_data
.idr_lock
);
686 idr_init(&group
->inotify_data
.idr
);
687 group
->inotify_data
.user
= get_current_user();
689 if (atomic_inc_return(&group
->inotify_data
.user
->inotify_devs
) >
690 inotify_max_user_instances
) {
691 fsnotify_destroy_group(group
);
692 return ERR_PTR(-EMFILE
);
699 /* inotify syscalls */
700 SYSCALL_DEFINE1(inotify_init1
, int, flags
)
702 struct fsnotify_group
*group
;
705 /* Check the IN_* constants for consistency. */
706 BUILD_BUG_ON(IN_CLOEXEC
!= O_CLOEXEC
);
707 BUILD_BUG_ON(IN_NONBLOCK
!= O_NONBLOCK
);
709 if (flags
& ~(IN_CLOEXEC
| IN_NONBLOCK
))
712 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
713 group
= inotify_new_group(inotify_max_queued_events
);
715 return PTR_ERR(group
);
717 ret
= anon_inode_getfd("inotify", &inotify_fops
, group
,
720 fsnotify_destroy_group(group
);
725 SYSCALL_DEFINE0(inotify_init
)
727 return sys_inotify_init1(0);
730 SYSCALL_DEFINE3(inotify_add_watch
, int, fd
, const char __user
*, pathname
,
733 struct fsnotify_group
*group
;
740 /* don't allow invalid bits: we don't want flags set */
741 if (unlikely(!(mask
& ALL_INOTIFY_BITS
)))
745 if (unlikely(!f
.file
))
748 /* verify that this is indeed an inotify instance */
749 if (unlikely(f
.file
->f_op
!= &inotify_fops
)) {
754 if (!(mask
& IN_DONT_FOLLOW
))
755 flags
|= LOOKUP_FOLLOW
;
756 if (mask
& IN_ONLYDIR
)
757 flags
|= LOOKUP_DIRECTORY
;
759 ret
= inotify_find_inode(pathname
, &path
, flags
);
763 /* inode held in place by reference to path; group by fget on fd */
764 inode
= path
.dentry
->d_inode
;
765 group
= f
.file
->private_data
;
767 /* create/update an inode mark */
768 ret
= inotify_update_watch(group
, inode
, mask
);
775 SYSCALL_DEFINE2(inotify_rm_watch
, int, fd
, __s32
, wd
)
777 struct fsnotify_group
*group
;
778 struct inotify_inode_mark
*i_mark
;
783 if (unlikely(!f
.file
))
786 /* verify that this is indeed an inotify instance */
788 if (unlikely(f
.file
->f_op
!= &inotify_fops
))
791 group
= f
.file
->private_data
;
794 i_mark
= inotify_idr_find(group
, wd
);
795 if (unlikely(!i_mark
))
800 fsnotify_destroy_mark(&i_mark
->fsn_mark
, group
);
802 /* match ref taken by inotify_idr_find */
803 fsnotify_put_mark(&i_mark
->fsn_mark
);
811 * inotify_user_setup - Our initialization function. Note that we cannot return
812 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
813 * must result in panic().
815 static int __init
inotify_user_setup(void)
817 BUILD_BUG_ON(IN_ACCESS
!= FS_ACCESS
);
818 BUILD_BUG_ON(IN_MODIFY
!= FS_MODIFY
);
819 BUILD_BUG_ON(IN_ATTRIB
!= FS_ATTRIB
);
820 BUILD_BUG_ON(IN_CLOSE_WRITE
!= FS_CLOSE_WRITE
);
821 BUILD_BUG_ON(IN_CLOSE_NOWRITE
!= FS_CLOSE_NOWRITE
);
822 BUILD_BUG_ON(IN_OPEN
!= FS_OPEN
);
823 BUILD_BUG_ON(IN_MOVED_FROM
!= FS_MOVED_FROM
);
824 BUILD_BUG_ON(IN_MOVED_TO
!= FS_MOVED_TO
);
825 BUILD_BUG_ON(IN_CREATE
!= FS_CREATE
);
826 BUILD_BUG_ON(IN_DELETE
!= FS_DELETE
);
827 BUILD_BUG_ON(IN_DELETE_SELF
!= FS_DELETE_SELF
);
828 BUILD_BUG_ON(IN_MOVE_SELF
!= FS_MOVE_SELF
);
829 BUILD_BUG_ON(IN_UNMOUNT
!= FS_UNMOUNT
);
830 BUILD_BUG_ON(IN_Q_OVERFLOW
!= FS_Q_OVERFLOW
);
831 BUILD_BUG_ON(IN_IGNORED
!= FS_IN_IGNORED
);
832 BUILD_BUG_ON(IN_EXCL_UNLINK
!= FS_EXCL_UNLINK
);
833 BUILD_BUG_ON(IN_ISDIR
!= FS_ISDIR
);
834 BUILD_BUG_ON(IN_ONESHOT
!= FS_IN_ONESHOT
);
836 BUG_ON(hweight32(ALL_INOTIFY_BITS
) != 21);
838 inotify_inode_mark_cachep
= KMEM_CACHE(inotify_inode_mark
, SLAB_PANIC
);
839 event_priv_cachep
= KMEM_CACHE(inotify_event_private_data
, SLAB_PANIC
);
841 inotify_max_queued_events
= 16384;
842 inotify_max_user_instances
= 128;
843 inotify_max_user_watches
= 8192;
847 module_init(inotify_user_setup
);