2 * fs/inotify_user.c - inotify support for userspace
5 * John McCutchan <ttb@tentacle.dhs.org>
6 * Robert Love <rml@novell.com>
8 * Copyright (C) 2005 John McCutchan
9 * Copyright 2006 Hewlett-Packard Development Company, L.P.
11 * Copyright (C) 2009 Eric Paris <Red Hat Inc>
12 * inotify was largely rewriten to make use of the fsnotify infrastructure
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2, or (at your option) any
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
25 #include <linux/file.h>
26 #include <linux/fs.h> /* struct inode */
27 #include <linux/fsnotify_backend.h>
28 #include <linux/idr.h>
29 #include <linux/init.h> /* module_init */
30 #include <linux/inotify.h>
31 #include <linux/kernel.h> /* roundup() */
32 #include <linux/magic.h> /* superblock magic number */
33 #include <linux/mount.h> /* mntget */
34 #include <linux/namei.h> /* LOOKUP_FOLLOW */
35 #include <linux/path.h> /* struct path */
36 #include <linux/sched.h> /* struct user */
37 #include <linux/slab.h> /* struct kmem_cache */
38 #include <linux/syscalls.h>
39 #include <linux/types.h>
40 #include <linux/uaccess.h>
41 #include <linux/poll.h>
42 #include <linux/wait.h>
46 #include <asm/ioctls.h>
48 static struct vfsmount
*inotify_mnt __read_mostly
;
50 /* these are configurable via /proc/sys/fs/inotify/ */
51 static int inotify_max_user_instances __read_mostly
;
52 static int inotify_max_queued_events __read_mostly
;
53 int inotify_max_user_watches __read_mostly
;
55 static struct kmem_cache
*inotify_inode_mark_cachep __read_mostly
;
56 struct kmem_cache
*event_priv_cachep __read_mostly
;
60 #include <linux/sysctl.h>
64 ctl_table inotify_table
[] = {
66 .ctl_name
= INOTIFY_MAX_USER_INSTANCES
,
67 .procname
= "max_user_instances",
68 .data
= &inotify_max_user_instances
,
69 .maxlen
= sizeof(int),
71 .proc_handler
= &proc_dointvec_minmax
,
72 .strategy
= &sysctl_intvec
,
76 .ctl_name
= INOTIFY_MAX_USER_WATCHES
,
77 .procname
= "max_user_watches",
78 .data
= &inotify_max_user_watches
,
79 .maxlen
= sizeof(int),
81 .proc_handler
= &proc_dointvec_minmax
,
82 .strategy
= &sysctl_intvec
,
86 .ctl_name
= INOTIFY_MAX_QUEUED_EVENTS
,
87 .procname
= "max_queued_events",
88 .data
= &inotify_max_queued_events
,
89 .maxlen
= sizeof(int),
91 .proc_handler
= &proc_dointvec_minmax
,
92 .strategy
= &sysctl_intvec
,
97 #endif /* CONFIG_SYSCTL */
99 static inline __u32
inotify_arg_to_mask(u32 arg
)
103 /* everything should accept their own ignored and cares about children */
104 mask
= (FS_IN_IGNORED
| FS_EVENT_ON_CHILD
);
106 /* mask off the flags used to open the fd */
107 mask
|= (arg
& (IN_ALL_EVENTS
| IN_ONESHOT
));
112 static inline u32
inotify_mask_to_arg(__u32 mask
)
114 return mask
& (IN_ALL_EVENTS
| IN_ISDIR
| IN_UNMOUNT
| IN_IGNORED
|
118 /* intofiy userspace file descriptor functions */
119 static unsigned int inotify_poll(struct file
*file
, poll_table
*wait
)
121 struct fsnotify_group
*group
= file
->private_data
;
124 poll_wait(file
, &group
->notification_waitq
, wait
);
125 mutex_lock(&group
->notification_mutex
);
126 if (!fsnotify_notify_queue_is_empty(group
))
127 ret
= POLLIN
| POLLRDNORM
;
128 mutex_unlock(&group
->notification_mutex
);
134 * Get an inotify_kernel_event if one exists and is small
135 * enough to fit in "count". Return an error pointer if
138 * Called with the group->notification_mutex held.
140 static struct fsnotify_event
*get_one_event(struct fsnotify_group
*group
,
143 size_t event_size
= sizeof(struct inotify_event
);
144 struct fsnotify_event
*event
;
146 if (fsnotify_notify_queue_is_empty(group
))
149 event
= fsnotify_peek_notify_event(group
);
152 event_size
+= roundup(event
->name_len
+ 1, event_size
);
154 if (event_size
> count
)
155 return ERR_PTR(-EINVAL
);
157 /* held the notification_mutex the whole time, so this is the
158 * same event we peeked above */
159 fsnotify_remove_notify_event(group
);
165 * Copy an event to user space, returning how much we copied.
167 * We already checked that the event size is smaller than the
168 * buffer we had in "get_one_event()" above.
170 static ssize_t
copy_event_to_user(struct fsnotify_group
*group
,
171 struct fsnotify_event
*event
,
174 struct inotify_event inotify_event
;
175 struct fsnotify_event_private_data
*fsn_priv
;
176 struct inotify_event_private_data
*priv
;
177 size_t event_size
= sizeof(struct inotify_event
);
180 /* we get the inotify watch descriptor from the event private data */
181 spin_lock(&event
->lock
);
182 fsn_priv
= fsnotify_remove_priv_from_event(group
, event
);
183 spin_unlock(&event
->lock
);
186 inotify_event
.wd
= -1;
188 priv
= container_of(fsn_priv
, struct inotify_event_private_data
,
189 fsnotify_event_priv_data
);
190 inotify_event
.wd
= priv
->wd
;
191 inotify_free_event_priv(fsn_priv
);
195 * round up event->name_len so it is a multiple of event_size
196 * plus an extra byte for the terminating '\0'.
199 name_len
= roundup(event
->name_len
+ 1, event_size
);
200 inotify_event
.len
= name_len
;
202 inotify_event
.mask
= inotify_mask_to_arg(event
->mask
);
203 inotify_event
.cookie
= event
->sync_cookie
;
205 /* send the main event */
206 if (copy_to_user(buf
, &inotify_event
, event_size
))
212 * fsnotify only stores the pathname, so here we have to send the pathname
213 * and then pad that pathname out to a multiple of sizeof(inotify_event)
214 * with zeros. I get my zeros from the nul_inotify_event.
217 unsigned int len_to_zero
= name_len
- event
->name_len
;
218 /* copy the path name */
219 if (copy_to_user(buf
, event
->file_name
, event
->name_len
))
221 buf
+= event
->name_len
;
223 /* fill userspace with 0's */
224 if (clear_user(buf
, len_to_zero
))
227 event_size
+= name_len
;
233 static ssize_t
inotify_read(struct file
*file
, char __user
*buf
,
234 size_t count
, loff_t
*pos
)
236 struct fsnotify_group
*group
;
237 struct fsnotify_event
*kevent
;
243 group
= file
->private_data
;
246 prepare_to_wait(&group
->notification_waitq
, &wait
, TASK_INTERRUPTIBLE
);
248 mutex_lock(&group
->notification_mutex
);
249 kevent
= get_one_event(group
, count
);
250 mutex_unlock(&group
->notification_mutex
);
253 ret
= PTR_ERR(kevent
);
256 ret
= copy_event_to_user(group
, kevent
, buf
);
257 fsnotify_put_event(kevent
);
266 if (file
->f_flags
& O_NONBLOCK
)
269 if (signal_pending(current
))
278 finish_wait(&group
->notification_waitq
, &wait
);
279 if (start
!= buf
&& ret
!= -EFAULT
)
284 static int inotify_fasync(int fd
, struct file
*file
, int on
)
286 struct fsnotify_group
*group
= file
->private_data
;
288 return fasync_helper(fd
, file
, on
, &group
->inotify_data
.fa
) >= 0 ? 0 : -EIO
;
291 static int inotify_release(struct inode
*ignored
, struct file
*file
)
293 struct fsnotify_group
*group
= file
->private_data
;
294 struct user_struct
*user
= group
->inotify_data
.user
;
296 fsnotify_clear_marks_by_group(group
);
298 /* free this group, matching get was inotify_init->fsnotify_obtain_group */
299 fsnotify_put_group(group
);
301 atomic_dec(&user
->inotify_devs
);
306 static long inotify_ioctl(struct file
*file
, unsigned int cmd
,
309 struct fsnotify_group
*group
;
310 struct fsnotify_event_holder
*holder
;
311 struct fsnotify_event
*event
;
316 group
= file
->private_data
;
317 p
= (void __user
*) arg
;
321 mutex_lock(&group
->notification_mutex
);
322 list_for_each_entry(holder
, &group
->notification_list
, event_list
) {
323 event
= holder
->event
;
324 send_len
+= sizeof(struct inotify_event
);
326 send_len
+= roundup(event
->name_len
+ 1,
327 sizeof(struct inotify_event
));
329 mutex_unlock(&group
->notification_mutex
);
330 ret
= put_user(send_len
, (int __user
*) p
);
337 static const struct file_operations inotify_fops
= {
338 .poll
= inotify_poll
,
339 .read
= inotify_read
,
340 .fasync
= inotify_fasync
,
341 .release
= inotify_release
,
342 .unlocked_ioctl
= inotify_ioctl
,
343 .compat_ioctl
= inotify_ioctl
,
348 * find_inode - resolve a user-given path to a specific inode
350 static int inotify_find_inode(const char __user
*dirname
, struct path
*path
, unsigned flags
)
354 error
= user_path_at(AT_FDCWD
, dirname
, flags
, path
);
357 /* you can only watch an inode if you have read permissions on it */
358 error
= inode_permission(path
->dentry
->d_inode
, MAY_READ
);
364 static int inotify_add_to_idr(struct idr
*idr
, spinlock_t
*idr_lock
,
366 struct inotify_inode_mark_entry
*ientry
)
371 if (unlikely(!idr_pre_get(idr
, GFP_KERNEL
)))
375 ret
= idr_get_new_above(idr
, ientry
, last_wd
+ 1,
377 /* we added the mark to the idr, take a reference */
379 fsnotify_get_mark(&ientry
->fsn_entry
);
380 spin_unlock(idr_lock
);
381 } while (ret
== -EAGAIN
);
386 static struct inotify_inode_mark_entry
*inotify_idr_find_locked(struct fsnotify_group
*group
,
389 struct idr
*idr
= &group
->inotify_data
.idr
;
390 spinlock_t
*idr_lock
= &group
->inotify_data
.idr_lock
;
391 struct inotify_inode_mark_entry
*ientry
;
393 assert_spin_locked(idr_lock
);
395 ientry
= idr_find(idr
, wd
);
397 struct fsnotify_mark_entry
*fsn_entry
= &ientry
->fsn_entry
;
399 fsnotify_get_mark(fsn_entry
);
400 /* One ref for being in the idr, one ref we just took */
401 BUG_ON(atomic_read(&fsn_entry
->refcnt
) < 2);
407 static struct inotify_inode_mark_entry
*inotify_idr_find(struct fsnotify_group
*group
,
410 struct inotify_inode_mark_entry
*ientry
;
411 spinlock_t
*idr_lock
= &group
->inotify_data
.idr_lock
;
414 ientry
= inotify_idr_find_locked(group
, wd
);
415 spin_unlock(idr_lock
);
420 static void do_inotify_remove_from_idr(struct fsnotify_group
*group
,
421 struct inotify_inode_mark_entry
*ientry
)
423 struct idr
*idr
= &group
->inotify_data
.idr
;
424 spinlock_t
*idr_lock
= &group
->inotify_data
.idr_lock
;
427 assert_spin_locked(idr_lock
);
431 /* removed from the idr, drop that ref */
432 fsnotify_put_mark(&ientry
->fsn_entry
);
436 * Remove the mark from the idr (if present) and drop the reference
437 * on the mark because it was in the idr.
439 static void inotify_remove_from_idr(struct fsnotify_group
*group
,
440 struct inotify_inode_mark_entry
*ientry
)
442 spinlock_t
*idr_lock
= &group
->inotify_data
.idr_lock
;
443 struct inotify_inode_mark_entry
*found_ientry
= NULL
;
450 * does this ientry think it is in the idr? we shouldn't get called
454 printk(KERN_WARNING
"%s: ientry=%p ientry->wd=%d ientry->group=%p"
455 " ientry->inode=%p\n", __func__
, ientry
, ientry
->wd
,
456 ientry
->fsn_entry
.group
, ientry
->fsn_entry
.inode
);
461 /* Lets look in the idr to see if we find it */
462 found_ientry
= inotify_idr_find_locked(group
, wd
);
463 if (unlikely(!found_ientry
)) {
464 printk(KERN_WARNING
"%s: ientry=%p ientry->wd=%d ientry->group=%p"
465 " ientry->inode=%p\n", __func__
, ientry
, ientry
->wd
,
466 ientry
->fsn_entry
.group
, ientry
->fsn_entry
.inode
);
472 * We found an entry in the idr at the right wd, but it's
473 * not the entry we were told to remove. eparis seriously
474 * fucked up somewhere.
476 if (unlikely(found_ientry
!= ientry
)) {
478 printk(KERN_WARNING
"%s: ientry=%p ientry->wd=%d ientry->group=%p "
479 "entry->inode=%p found_ientry=%p found_ientry->wd=%d "
480 "found_ientry->group=%p found_ientry->inode=%p\n",
481 __func__
, ientry
, ientry
->wd
, ientry
->fsn_entry
.group
,
482 ientry
->fsn_entry
.inode
, found_ientry
, found_ientry
->wd
,
483 found_ientry
->fsn_entry
.group
,
484 found_ientry
->fsn_entry
.inode
);
489 * One ref for being in the idr
490 * one ref held by the caller trying to kill us
491 * one ref grabbed by inotify_idr_find
493 if (unlikely(atomic_read(&ientry
->fsn_entry
.refcnt
) < 3)) {
494 printk(KERN_WARNING
"%s: ientry=%p ientry->wd=%d ientry->group=%p"
495 " ientry->inode=%p\n", __func__
, ientry
, ientry
->wd
,
496 ientry
->fsn_entry
.group
, ientry
->fsn_entry
.inode
);
497 /* we can't really recover with bad ref cnting.. */
501 do_inotify_remove_from_idr(group
, ientry
);
503 /* match the ref taken by inotify_idr_find_locked() */
505 fsnotify_put_mark(&found_ientry
->fsn_entry
);
507 spin_unlock(idr_lock
);
511 * Send IN_IGNORED for this wd, remove this wd from the idr.
513 void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry
*entry
,
514 struct fsnotify_group
*group
)
516 struct inotify_inode_mark_entry
*ientry
;
517 struct fsnotify_event
*ignored_event
;
518 struct inotify_event_private_data
*event_priv
;
519 struct fsnotify_event_private_data
*fsn_event_priv
;
522 ignored_event
= fsnotify_create_event(NULL
, FS_IN_IGNORED
, NULL
,
523 FSNOTIFY_EVENT_NONE
, NULL
, 0,
528 ientry
= container_of(entry
, struct inotify_inode_mark_entry
, fsn_entry
);
530 event_priv
= kmem_cache_alloc(event_priv_cachep
, GFP_NOFS
);
531 if (unlikely(!event_priv
))
532 goto skip_send_ignore
;
534 fsn_event_priv
= &event_priv
->fsnotify_event_priv_data
;
536 fsn_event_priv
->group
= group
;
537 event_priv
->wd
= ientry
->wd
;
539 ret
= fsnotify_add_notify_event(group
, ignored_event
, fsn_event_priv
, NULL
);
541 inotify_free_event_priv(fsn_event_priv
);
545 /* matches the reference taken when the event was created */
546 fsnotify_put_event(ignored_event
);
548 /* remove this entry from the idr */
549 inotify_remove_from_idr(group
, ientry
);
551 atomic_dec(&group
->inotify_data
.user
->inotify_watches
);
554 /* ding dong the mark is dead */
555 static void inotify_free_mark(struct fsnotify_mark_entry
*entry
)
557 struct inotify_inode_mark_entry
*ientry
;
559 ientry
= container_of(entry
, struct inotify_inode_mark_entry
, fsn_entry
);
561 kmem_cache_free(inotify_inode_mark_cachep
, ientry
);
564 static int inotify_update_existing_watch(struct fsnotify_group
*group
,
568 struct fsnotify_mark_entry
*entry
;
569 struct inotify_inode_mark_entry
*ientry
;
570 __u32 old_mask
, new_mask
;
572 int add
= (arg
& IN_MASK_ADD
);
575 /* don't allow invalid bits: we don't want flags set */
576 mask
= inotify_arg_to_mask(arg
);
580 spin_lock(&inode
->i_lock
);
581 entry
= fsnotify_find_mark_entry(group
, inode
);
582 spin_unlock(&inode
->i_lock
);
586 ientry
= container_of(entry
, struct inotify_inode_mark_entry
, fsn_entry
);
588 spin_lock(&entry
->lock
);
590 old_mask
= entry
->mask
;
593 new_mask
= entry
->mask
;
596 new_mask
= entry
->mask
;
599 spin_unlock(&entry
->lock
);
601 if (old_mask
!= new_mask
) {
602 /* more bits in old than in new? */
603 int dropped
= (old_mask
& ~new_mask
);
604 /* more bits in this entry than the inode's mask? */
605 int do_inode
= (new_mask
& ~inode
->i_fsnotify_mask
);
606 /* more bits in this entry than the group? */
607 int do_group
= (new_mask
& ~group
->mask
);
609 /* update the inode with this new entry */
610 if (dropped
|| do_inode
)
611 fsnotify_recalc_inode_mask(inode
);
613 /* update the group mask with the new mask */
614 if (dropped
|| do_group
)
615 fsnotify_recalc_group_mask(group
);
621 /* match the get from fsnotify_find_mark_entry() */
622 fsnotify_put_mark(entry
);
627 static int inotify_new_watch(struct fsnotify_group
*group
,
631 struct inotify_inode_mark_entry
*tmp_ientry
;
634 struct idr
*idr
= &group
->inotify_data
.idr
;
635 spinlock_t
*idr_lock
= &group
->inotify_data
.idr_lock
;
637 /* don't allow invalid bits: we don't want flags set */
638 mask
= inotify_arg_to_mask(arg
);
642 tmp_ientry
= kmem_cache_alloc(inotify_inode_mark_cachep
, GFP_KERNEL
);
643 if (unlikely(!tmp_ientry
))
646 fsnotify_init_mark(&tmp_ientry
->fsn_entry
, inotify_free_mark
);
647 tmp_ientry
->fsn_entry
.mask
= mask
;
651 if (atomic_read(&group
->inotify_data
.user
->inotify_watches
) >= inotify_max_user_watches
)
654 ret
= inotify_add_to_idr(idr
, idr_lock
, group
->inotify_data
.last_wd
,
659 /* we are on the idr, now get on the inode */
660 ret
= fsnotify_add_mark(&tmp_ientry
->fsn_entry
, group
, inode
, 0);
662 /* we failed to get on the inode, get off the idr */
663 inotify_remove_from_idr(group
, tmp_ientry
);
667 /* update the idr hint, who cares about races, it's just a hint */
668 group
->inotify_data
.last_wd
= tmp_ientry
->wd
;
670 /* increment the number of watches the user has */
671 atomic_inc(&group
->inotify_data
.user
->inotify_watches
);
673 /* return the watch descriptor for this new entry */
674 ret
= tmp_ientry
->wd
;
676 /* if this mark added a new event update the group mask */
677 if (mask
& ~group
->mask
)
678 fsnotify_recalc_group_mask(group
);
681 /* match the ref from fsnotify_init_markentry() */
682 fsnotify_put_mark(&tmp_ientry
->fsn_entry
);
687 static int inotify_update_watch(struct fsnotify_group
*group
, struct inode
*inode
, u32 arg
)
692 /* try to update and existing watch with the new arg */
693 ret
= inotify_update_existing_watch(group
, inode
, arg
);
694 /* no mark present, try to add a new one */
696 ret
= inotify_new_watch(group
, inode
, arg
);
698 * inotify_new_watch could race with another thread which did an
699 * inotify_new_watch between the update_existing and the add watch
700 * here, go back and try to update an existing mark again.
708 static struct fsnotify_group
*inotify_new_group(struct user_struct
*user
, unsigned int max_events
)
710 struct fsnotify_group
*group
;
712 group
= fsnotify_obtain_group(0, &inotify_fsnotify_ops
);
716 group
->max_events
= max_events
;
718 spin_lock_init(&group
->inotify_data
.idr_lock
);
719 idr_init(&group
->inotify_data
.idr
);
720 group
->inotify_data
.last_wd
= 0;
721 group
->inotify_data
.user
= user
;
722 group
->inotify_data
.fa
= NULL
;
728 /* inotify syscalls */
729 SYSCALL_DEFINE1(inotify_init1
, int, flags
)
731 struct fsnotify_group
*group
;
732 struct user_struct
*user
;
736 /* Check the IN_* constants for consistency. */
737 BUILD_BUG_ON(IN_CLOEXEC
!= O_CLOEXEC
);
738 BUILD_BUG_ON(IN_NONBLOCK
!= O_NONBLOCK
);
740 if (flags
& ~(IN_CLOEXEC
| IN_NONBLOCK
))
743 fd
= get_unused_fd_flags(flags
& O_CLOEXEC
);
747 filp
= get_empty_filp();
753 user
= get_current_user();
754 if (unlikely(atomic_read(&user
->inotify_devs
) >=
755 inotify_max_user_instances
)) {
760 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
761 group
= inotify_new_group(user
, inotify_max_queued_events
);
763 ret
= PTR_ERR(group
);
767 filp
->f_op
= &inotify_fops
;
768 filp
->f_path
.mnt
= mntget(inotify_mnt
);
769 filp
->f_path
.dentry
= dget(inotify_mnt
->mnt_root
);
770 filp
->f_mapping
= filp
->f_path
.dentry
->d_inode
->i_mapping
;
771 filp
->f_mode
= FMODE_READ
;
772 filp
->f_flags
= O_RDONLY
| (flags
& O_NONBLOCK
);
773 filp
->private_data
= group
;
775 atomic_inc(&user
->inotify_devs
);
777 fd_install(fd
, filp
);
789 SYSCALL_DEFINE0(inotify_init
)
791 return sys_inotify_init1(0);
794 SYSCALL_DEFINE3(inotify_add_watch
, int, fd
, const char __user
*, pathname
,
797 struct fsnotify_group
*group
;
801 int ret
, fput_needed
;
804 filp
= fget_light(fd
, &fput_needed
);
808 /* verify that this is indeed an inotify instance */
809 if (unlikely(filp
->f_op
!= &inotify_fops
)) {
814 if (!(mask
& IN_DONT_FOLLOW
))
815 flags
|= LOOKUP_FOLLOW
;
816 if (mask
& IN_ONLYDIR
)
817 flags
|= LOOKUP_DIRECTORY
;
819 ret
= inotify_find_inode(pathname
, &path
, flags
);
823 /* inode held in place by reference to path; group by fget on fd */
824 inode
= path
.dentry
->d_inode
;
825 group
= filp
->private_data
;
827 /* create/update an inode mark */
828 ret
= inotify_update_watch(group
, inode
, mask
);
830 goto path_put_and_out
;
835 fput_light(filp
, fput_needed
);
839 SYSCALL_DEFINE2(inotify_rm_watch
, int, fd
, __s32
, wd
)
841 struct fsnotify_group
*group
;
842 struct inotify_inode_mark_entry
*ientry
;
844 int ret
= 0, fput_needed
;
846 filp
= fget_light(fd
, &fput_needed
);
850 /* verify that this is indeed an inotify instance */
852 if (unlikely(filp
->f_op
!= &inotify_fops
))
855 group
= filp
->private_data
;
858 ientry
= inotify_idr_find(group
, wd
);
859 if (unlikely(!ientry
))
864 fsnotify_destroy_mark_by_entry(&ientry
->fsn_entry
);
866 /* match ref taken by inotify_idr_find */
867 fsnotify_put_mark(&ientry
->fsn_entry
);
870 fput_light(filp
, fput_needed
);
875 inotify_get_sb(struct file_system_type
*fs_type
, int flags
,
876 const char *dev_name
, void *data
, struct vfsmount
*mnt
)
878 return get_sb_pseudo(fs_type
, "inotify", NULL
,
879 INOTIFYFS_SUPER_MAGIC
, mnt
);
882 static struct file_system_type inotify_fs_type
= {
884 .get_sb
= inotify_get_sb
,
885 .kill_sb
= kill_anon_super
,
889 * inotify_user_setup - Our initialization function. Note that we cannnot return
890 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
891 * must result in panic().
893 static int __init
inotify_user_setup(void)
897 ret
= register_filesystem(&inotify_fs_type
);
899 panic("inotify: register_filesystem returned %d!\n", ret
);
901 inotify_mnt
= kern_mount(&inotify_fs_type
);
902 if (IS_ERR(inotify_mnt
))
903 panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt
));
905 inotify_inode_mark_cachep
= KMEM_CACHE(inotify_inode_mark_entry
, SLAB_PANIC
);
906 event_priv_cachep
= KMEM_CACHE(inotify_event_private_data
, SLAB_PANIC
);
908 inotify_max_queued_events
= 16384;
909 inotify_max_user_instances
= 128;
910 inotify_max_user_watches
= 8192;
914 module_init(inotify_user_setup
);