2 * fs/inotify_user.c - inotify support for userspace
5 * John McCutchan <ttb@tentacle.dhs.org>
6 * Robert Love <rml@novell.com>
8 * Copyright (C) 2005 John McCutchan
9 * Copyright 2006 Hewlett-Packard Development Company, L.P.
11 * Copyright (C) 2009 Eric Paris <Red Hat Inc>
12 * inotify was largely rewriten to make use of the fsnotify infrastructure
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2, or (at your option) any
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
25 #include <linux/file.h>
26 #include <linux/fs.h> /* struct inode */
27 #include <linux/fsnotify_backend.h>
28 #include <linux/idr.h>
29 #include <linux/init.h> /* module_init */
30 #include <linux/inotify.h>
31 #include <linux/kernel.h> /* roundup() */
32 #include <linux/magic.h> /* superblock magic number */
33 #include <linux/mount.h> /* mntget */
34 #include <linux/namei.h> /* LOOKUP_FOLLOW */
35 #include <linux/path.h> /* struct path */
36 #include <linux/sched.h> /* struct user */
37 #include <linux/slab.h> /* struct kmem_cache */
38 #include <linux/syscalls.h>
39 #include <linux/types.h>
40 #include <linux/uaccess.h>
41 #include <linux/poll.h>
42 #include <linux/wait.h>
46 #include <asm/ioctls.h>
48 static struct vfsmount
*inotify_mnt __read_mostly
;
50 /* this just sits here and wastes global memory. used to just pad userspace messages with zeros */
51 static struct inotify_event nul_inotify_event
;
53 /* these are configurable via /proc/sys/fs/inotify/ */
54 static int inotify_max_user_instances __read_mostly
;
55 static int inotify_max_queued_events __read_mostly
;
56 int inotify_max_user_watches __read_mostly
;
58 static struct kmem_cache
*inotify_inode_mark_cachep __read_mostly
;
59 struct kmem_cache
*event_priv_cachep __read_mostly
;
62 * When inotify registers a new group it increments this and uses that
63 * value as an offset to set the fsnotify group "name" and priority.
65 static atomic_t inotify_grp_num
;
69 #include <linux/sysctl.h>
73 ctl_table inotify_table
[] = {
75 .ctl_name
= INOTIFY_MAX_USER_INSTANCES
,
76 .procname
= "max_user_instances",
77 .data
= &inotify_max_user_instances
,
78 .maxlen
= sizeof(int),
80 .proc_handler
= &proc_dointvec_minmax
,
81 .strategy
= &sysctl_intvec
,
85 .ctl_name
= INOTIFY_MAX_USER_WATCHES
,
86 .procname
= "max_user_watches",
87 .data
= &inotify_max_user_watches
,
88 .maxlen
= sizeof(int),
90 .proc_handler
= &proc_dointvec_minmax
,
91 .strategy
= &sysctl_intvec
,
95 .ctl_name
= INOTIFY_MAX_QUEUED_EVENTS
,
96 .procname
= "max_queued_events",
97 .data
= &inotify_max_queued_events
,
98 .maxlen
= sizeof(int),
100 .proc_handler
= &proc_dointvec_minmax
,
101 .strategy
= &sysctl_intvec
,
106 #endif /* CONFIG_SYSCTL */
108 static inline __u32
inotify_arg_to_mask(u32 arg
)
112 /* everything should accept their own ignored and cares about children */
113 mask
= (FS_IN_IGNORED
| FS_EVENT_ON_CHILD
);
115 /* mask off the flags used to open the fd */
116 mask
|= (arg
& (IN_ALL_EVENTS
| IN_ONESHOT
));
121 static inline u32
inotify_mask_to_arg(__u32 mask
)
123 return mask
& (IN_ALL_EVENTS
| IN_ISDIR
| IN_UNMOUNT
| IN_IGNORED
|
127 /* intofiy userspace file descriptor functions */
128 static unsigned int inotify_poll(struct file
*file
, poll_table
*wait
)
130 struct fsnotify_group
*group
= file
->private_data
;
133 poll_wait(file
, &group
->notification_waitq
, wait
);
134 mutex_lock(&group
->notification_mutex
);
135 if (!fsnotify_notify_queue_is_empty(group
))
136 ret
= POLLIN
| POLLRDNORM
;
137 mutex_unlock(&group
->notification_mutex
);
143 * Get an inotify_kernel_event if one exists and is small
144 * enough to fit in "count". Return an error pointer if
147 * Called with the group->notification_mutex held.
149 static struct fsnotify_event
*get_one_event(struct fsnotify_group
*group
,
152 size_t event_size
= sizeof(struct inotify_event
);
153 struct fsnotify_event
*event
;
155 if (fsnotify_notify_queue_is_empty(group
))
158 event
= fsnotify_peek_notify_event(group
);
160 event_size
+= roundup(event
->name_len
, event_size
);
162 if (event_size
> count
)
163 return ERR_PTR(-EINVAL
);
165 /* held the notification_mutex the whole time, so this is the
166 * same event we peeked above */
167 fsnotify_remove_notify_event(group
);
173 * Copy an event to user space, returning how much we copied.
175 * We already checked that the event size is smaller than the
176 * buffer we had in "get_one_event()" above.
178 static ssize_t
copy_event_to_user(struct fsnotify_group
*group
,
179 struct fsnotify_event
*event
,
182 struct inotify_event inotify_event
;
183 struct fsnotify_event_private_data
*fsn_priv
;
184 struct inotify_event_private_data
*priv
;
185 size_t event_size
= sizeof(struct inotify_event
);
188 /* we get the inotify watch descriptor from the event private data */
189 spin_lock(&event
->lock
);
190 fsn_priv
= fsnotify_remove_priv_from_event(group
, event
);
191 spin_unlock(&event
->lock
);
194 inotify_event
.wd
= -1;
196 priv
= container_of(fsn_priv
, struct inotify_event_private_data
,
197 fsnotify_event_priv_data
);
198 inotify_event
.wd
= priv
->wd
;
199 inotify_free_event_priv(fsn_priv
);
202 /* round up event->name_len so it is a multiple of event_size */
203 name_len
= roundup(event
->name_len
, event_size
);
204 inotify_event
.len
= name_len
;
206 inotify_event
.mask
= inotify_mask_to_arg(event
->mask
);
207 inotify_event
.cookie
= event
->sync_cookie
;
209 /* send the main event */
210 if (copy_to_user(buf
, &inotify_event
, event_size
))
216 * fsnotify only stores the pathname, so here we have to send the pathname
217 * and then pad that pathname out to a multiple of sizeof(inotify_event)
218 * with zeros. I get my zeros from the nul_inotify_event.
221 unsigned int len_to_zero
= name_len
- event
->name_len
;
222 /* copy the path name */
223 if (copy_to_user(buf
, event
->file_name
, event
->name_len
))
225 buf
+= event
->name_len
;
227 /* fill userspace with 0's from nul_inotify_event */
228 if (copy_to_user(buf
, &nul_inotify_event
, len_to_zero
))
231 event_size
+= name_len
;
237 static ssize_t
inotify_read(struct file
*file
, char __user
*buf
,
238 size_t count
, loff_t
*pos
)
240 struct fsnotify_group
*group
;
241 struct fsnotify_event
*kevent
;
247 group
= file
->private_data
;
250 prepare_to_wait(&group
->notification_waitq
, &wait
, TASK_INTERRUPTIBLE
);
252 mutex_lock(&group
->notification_mutex
);
253 kevent
= get_one_event(group
, count
);
254 mutex_unlock(&group
->notification_mutex
);
257 ret
= PTR_ERR(kevent
);
260 ret
= copy_event_to_user(group
, kevent
, buf
);
261 fsnotify_put_event(kevent
);
270 if (file
->f_flags
& O_NONBLOCK
)
273 if (signal_pending(current
))
282 finish_wait(&group
->notification_waitq
, &wait
);
283 if (start
!= buf
&& ret
!= -EFAULT
)
288 static int inotify_fasync(int fd
, struct file
*file
, int on
)
290 struct fsnotify_group
*group
= file
->private_data
;
292 return fasync_helper(fd
, file
, on
, &group
->inotify_data
.fa
) >= 0 ? 0 : -EIO
;
295 static int inotify_release(struct inode
*ignored
, struct file
*file
)
297 struct fsnotify_group
*group
= file
->private_data
;
298 struct user_struct
*user
= group
->inotify_data
.user
;
300 fsnotify_clear_marks_by_group(group
);
302 /* free this group, matching get was inotify_init->fsnotify_obtain_group */
303 fsnotify_put_group(group
);
305 atomic_dec(&user
->inotify_devs
);
310 static long inotify_ioctl(struct file
*file
, unsigned int cmd
,
313 struct fsnotify_group
*group
;
314 struct fsnotify_event_holder
*holder
;
315 struct fsnotify_event
*event
;
320 group
= file
->private_data
;
321 p
= (void __user
*) arg
;
325 mutex_lock(&group
->notification_mutex
);
326 list_for_each_entry(holder
, &group
->notification_list
, event_list
) {
327 event
= holder
->event
;
328 send_len
+= sizeof(struct inotify_event
);
329 send_len
+= roundup(event
->name_len
,
330 sizeof(struct inotify_event
));
332 mutex_unlock(&group
->notification_mutex
);
333 ret
= put_user(send_len
, (int __user
*) p
);
340 static const struct file_operations inotify_fops
= {
341 .poll
= inotify_poll
,
342 .read
= inotify_read
,
343 .fasync
= inotify_fasync
,
344 .release
= inotify_release
,
345 .unlocked_ioctl
= inotify_ioctl
,
346 .compat_ioctl
= inotify_ioctl
,
351 * find_inode - resolve a user-given path to a specific inode
353 static int inotify_find_inode(const char __user
*dirname
, struct path
*path
, unsigned flags
)
357 error
= user_path_at(AT_FDCWD
, dirname
, flags
, path
);
360 /* you can only watch an inode if you have read permissions on it */
361 error
= inode_permission(path
->dentry
->d_inode
, MAY_READ
);
367 static void inotify_remove_from_idr(struct fsnotify_group
*group
,
368 struct inotify_inode_mark_entry
*ientry
)
372 spin_lock(&group
->inotify_data
.idr_lock
);
373 idr
= &group
->inotify_data
.idr
;
374 idr_remove(idr
, ientry
->wd
);
375 spin_unlock(&group
->inotify_data
.idr_lock
);
379 * Send IN_IGNORED for this wd, remove this wd from the idr, and drop the
380 * internal reference help on the mark because it is in the idr.
382 void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry
*entry
,
383 struct fsnotify_group
*group
)
385 struct inotify_inode_mark_entry
*ientry
;
386 struct fsnotify_event
*ignored_event
;
387 struct inotify_event_private_data
*event_priv
;
388 struct fsnotify_event_private_data
*fsn_event_priv
;
390 ignored_event
= fsnotify_create_event(NULL
, FS_IN_IGNORED
, NULL
,
391 FSNOTIFY_EVENT_NONE
, NULL
, 0,
396 ientry
= container_of(entry
, struct inotify_inode_mark_entry
, fsn_entry
);
398 event_priv
= kmem_cache_alloc(event_priv_cachep
, GFP_NOFS
);
399 if (unlikely(!event_priv
))
400 goto skip_send_ignore
;
402 fsn_event_priv
= &event_priv
->fsnotify_event_priv_data
;
404 fsn_event_priv
->group
= group
;
405 event_priv
->wd
= ientry
->wd
;
407 fsnotify_add_notify_event(group
, ignored_event
, fsn_event_priv
);
409 /* did the private data get added? */
410 if (list_empty(&fsn_event_priv
->event_list
))
411 inotify_free_event_priv(fsn_event_priv
);
415 /* matches the reference taken when the event was created */
416 fsnotify_put_event(ignored_event
);
418 /* remove this entry from the idr */
419 inotify_remove_from_idr(group
, ientry
);
421 /* removed from idr, drop that reference */
422 fsnotify_put_mark(entry
);
424 atomic_dec(&group
->inotify_data
.user
->inotify_watches
);
427 /* ding dong the mark is dead */
428 static void inotify_free_mark(struct fsnotify_mark_entry
*entry
)
430 struct inotify_inode_mark_entry
*ientry
= (struct inotify_inode_mark_entry
*)entry
;
432 kmem_cache_free(inotify_inode_mark_cachep
, ientry
);
435 static int inotify_update_watch(struct fsnotify_group
*group
, struct inode
*inode
, u32 arg
)
437 struct fsnotify_mark_entry
*entry
= NULL
;
438 struct inotify_inode_mark_entry
*ientry
;
439 struct inotify_inode_mark_entry
*tmp_ientry
;
441 int add
= (arg
& IN_MASK_ADD
);
443 __u32 old_mask
, new_mask
;
445 /* don't allow invalid bits: we don't want flags set */
446 mask
= inotify_arg_to_mask(arg
);
450 tmp_ientry
= kmem_cache_alloc(inotify_inode_mark_cachep
, GFP_KERNEL
);
451 if (unlikely(!tmp_ientry
))
453 /* we set the mask at the end after attaching it */
454 fsnotify_init_mark(&tmp_ientry
->fsn_entry
, inotify_free_mark
);
458 spin_lock(&inode
->i_lock
);
459 entry
= fsnotify_find_mark_entry(group
, inode
);
460 spin_unlock(&inode
->i_lock
);
462 ientry
= container_of(entry
, struct inotify_inode_mark_entry
, fsn_entry
);
465 if (atomic_read(&group
->inotify_data
.user
->inotify_watches
) >= inotify_max_user_watches
)
469 if (unlikely(!idr_pre_get(&group
->inotify_data
.idr
, GFP_KERNEL
)))
472 spin_lock(&group
->inotify_data
.idr_lock
);
473 ret
= idr_get_new_above(&group
->inotify_data
.idr
, &tmp_ientry
->fsn_entry
,
474 group
->inotify_data
.last_wd
,
476 spin_unlock(&group
->inotify_data
.idr_lock
);
483 ret
= fsnotify_add_mark(&tmp_ientry
->fsn_entry
, group
, inode
);
485 inotify_remove_from_idr(group
, tmp_ientry
);
491 /* tmp_ientry has been added to the inode, so we are all set up.
492 * now we just need to make sure tmp_ientry doesn't get freed and
493 * we need to set up entry and ientry so the generic code can
496 entry
= &ientry
->fsn_entry
;
499 atomic_inc(&group
->inotify_data
.user
->inotify_watches
);
501 /* update the idr hint */
502 group
->inotify_data
.last_wd
= ientry
->wd
;
504 /* we put the mark on the idr, take a reference */
505 fsnotify_get_mark(entry
);
510 spin_lock(&entry
->lock
);
512 old_mask
= entry
->mask
;
515 new_mask
= entry
->mask
;
518 new_mask
= entry
->mask
;
521 spin_unlock(&entry
->lock
);
523 if (old_mask
!= new_mask
) {
524 /* more bits in old than in new? */
525 int dropped
= (old_mask
& ~new_mask
);
526 /* more bits in this entry than the inode's mask? */
527 int do_inode
= (new_mask
& ~inode
->i_fsnotify_mask
);
528 /* more bits in this entry than the group? */
529 int do_group
= (new_mask
& ~group
->mask
);
531 /* update the inode with this new entry */
532 if (dropped
|| do_inode
)
533 fsnotify_recalc_inode_mask(inode
);
535 /* update the group mask with the new mask */
536 if (dropped
|| do_group
)
537 fsnotify_recalc_group_mask(group
);
540 /* this either matches fsnotify_find_mark_entry, or init_mark_entry
541 * depending on which path we took... */
542 fsnotify_put_mark(entry
);
545 /* could be an error, could be that we found an existing mark */
547 /* on the idr but didn't make it on the inode */
548 if (tmp_ientry
->wd
!= -1)
549 inotify_remove_from_idr(group
, tmp_ientry
);
550 kmem_cache_free(inotify_inode_mark_cachep
, tmp_ientry
);
556 static struct fsnotify_group
*inotify_new_group(struct user_struct
*user
, unsigned int max_events
)
558 struct fsnotify_group
*group
;
559 unsigned int grp_num
;
561 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
562 grp_num
= (INOTIFY_GROUP_NUM
- atomic_inc_return(&inotify_grp_num
));
563 group
= fsnotify_obtain_group(grp_num
, 0, &inotify_fsnotify_ops
);
567 group
->max_events
= max_events
;
569 spin_lock_init(&group
->inotify_data
.idr_lock
);
570 idr_init(&group
->inotify_data
.idr
);
571 group
->inotify_data
.last_wd
= 0;
572 group
->inotify_data
.user
= user
;
573 group
->inotify_data
.fa
= NULL
;
579 /* inotify syscalls */
580 SYSCALL_DEFINE1(inotify_init1
, int, flags
)
582 struct fsnotify_group
*group
;
583 struct user_struct
*user
;
587 /* Check the IN_* constants for consistency. */
588 BUILD_BUG_ON(IN_CLOEXEC
!= O_CLOEXEC
);
589 BUILD_BUG_ON(IN_NONBLOCK
!= O_NONBLOCK
);
591 if (flags
& ~(IN_CLOEXEC
| IN_NONBLOCK
))
594 fd
= get_unused_fd_flags(flags
& O_CLOEXEC
);
598 filp
= get_empty_filp();
604 user
= get_current_user();
605 if (unlikely(atomic_read(&user
->inotify_devs
) >=
606 inotify_max_user_instances
)) {
611 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
612 group
= inotify_new_group(user
, inotify_max_queued_events
);
614 ret
= PTR_ERR(group
);
618 filp
->f_op
= &inotify_fops
;
619 filp
->f_path
.mnt
= mntget(inotify_mnt
);
620 filp
->f_path
.dentry
= dget(inotify_mnt
->mnt_root
);
621 filp
->f_mapping
= filp
->f_path
.dentry
->d_inode
->i_mapping
;
622 filp
->f_mode
= FMODE_READ
;
623 filp
->f_flags
= O_RDONLY
| (flags
& O_NONBLOCK
);
624 filp
->private_data
= group
;
626 atomic_inc(&user
->inotify_devs
);
628 fd_install(fd
, filp
);
640 SYSCALL_DEFINE0(inotify_init
)
642 return sys_inotify_init1(0);
645 SYSCALL_DEFINE3(inotify_add_watch
, int, fd
, const char __user
*, pathname
,
648 struct fsnotify_group
*group
;
652 int ret
, fput_needed
;
655 filp
= fget_light(fd
, &fput_needed
);
659 /* verify that this is indeed an inotify instance */
660 if (unlikely(filp
->f_op
!= &inotify_fops
)) {
665 if (!(mask
& IN_DONT_FOLLOW
))
666 flags
|= LOOKUP_FOLLOW
;
667 if (mask
& IN_ONLYDIR
)
668 flags
|= LOOKUP_DIRECTORY
;
670 ret
= inotify_find_inode(pathname
, &path
, flags
);
674 /* inode held in place by reference to path; group by fget on fd */
675 inode
= path
.dentry
->d_inode
;
676 group
= filp
->private_data
;
678 /* create/update an inode mark */
679 ret
= inotify_update_watch(group
, inode
, mask
);
681 goto path_put_and_out
;
686 fput_light(filp
, fput_needed
);
690 SYSCALL_DEFINE2(inotify_rm_watch
, int, fd
, __s32
, wd
)
692 struct fsnotify_group
*group
;
693 struct fsnotify_mark_entry
*entry
;
695 int ret
= 0, fput_needed
;
697 filp
= fget_light(fd
, &fput_needed
);
701 /* verify that this is indeed an inotify instance */
702 if (unlikely(filp
->f_op
!= &inotify_fops
)) {
707 group
= filp
->private_data
;
709 spin_lock(&group
->inotify_data
.idr_lock
);
710 entry
= idr_find(&group
->inotify_data
.idr
, wd
);
711 if (unlikely(!entry
)) {
712 spin_unlock(&group
->inotify_data
.idr_lock
);
716 fsnotify_get_mark(entry
);
717 spin_unlock(&group
->inotify_data
.idr_lock
);
719 fsnotify_destroy_mark_by_entry(entry
);
720 fsnotify_put_mark(entry
);
723 fput_light(filp
, fput_needed
);
728 inotify_get_sb(struct file_system_type
*fs_type
, int flags
,
729 const char *dev_name
, void *data
, struct vfsmount
*mnt
)
731 return get_sb_pseudo(fs_type
, "inotify", NULL
,
732 INOTIFYFS_SUPER_MAGIC
, mnt
);
735 static struct file_system_type inotify_fs_type
= {
737 .get_sb
= inotify_get_sb
,
738 .kill_sb
= kill_anon_super
,
742 * inotify_user_setup - Our initialization function. Note that we cannnot return
743 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
744 * must result in panic().
746 static int __init
inotify_user_setup(void)
750 ret
= register_filesystem(&inotify_fs_type
);
752 panic("inotify: register_filesystem returned %d!\n", ret
);
754 inotify_mnt
= kern_mount(&inotify_fs_type
);
755 if (IS_ERR(inotify_mnt
))
756 panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt
));
758 inotify_inode_mark_cachep
= KMEM_CACHE(inotify_inode_mark_entry
, SLAB_PANIC
);
759 event_priv_cachep
= KMEM_CACHE(inotify_event_private_data
, SLAB_PANIC
);
761 inotify_max_queued_events
= 16384;
762 inotify_max_user_instances
= 128;
763 inotify_max_user_watches
= 8192;
767 module_init(inotify_user_setup
);