2 * fs/inotify_user.c - inotify support for userspace
5 * John McCutchan <ttb@tentacle.dhs.org>
6 * Robert Love <rml@novell.com>
8 * Copyright (C) 2005 John McCutchan
9 * Copyright 2006 Hewlett-Packard Development Company, L.P.
11 * Copyright (C) 2009 Eric Paris <Red Hat Inc>
12 * inotify was largely rewriten to make use of the fsnotify infrastructure
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2, or (at your option) any
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
25 #include <linux/file.h>
26 #include <linux/fs.h> /* struct inode */
27 #include <linux/fsnotify_backend.h>
28 #include <linux/idr.h>
29 #include <linux/init.h> /* module_init */
30 #include <linux/inotify.h>
31 #include <linux/kernel.h> /* roundup() */
32 #include <linux/namei.h> /* LOOKUP_FOLLOW */
33 #include <linux/sched.h> /* struct user */
34 #include <linux/slab.h> /* struct kmem_cache */
35 #include <linux/syscalls.h>
36 #include <linux/types.h>
37 #include <linux/anon_inodes.h>
38 #include <linux/uaccess.h>
39 #include <linux/poll.h>
40 #include <linux/wait.h>
43 #include "../fdinfo.h"
45 #include <asm/ioctls.h>
47 /* these are configurable via /proc/sys/fs/inotify/ */
48 static int inotify_max_user_instances __read_mostly
;
49 static int inotify_max_queued_events __read_mostly
;
50 static int inotify_max_user_watches __read_mostly
;
52 static struct kmem_cache
*inotify_inode_mark_cachep __read_mostly
;
56 #include <linux/sysctl.h>
60 ctl_table inotify_table
[] = {
62 .procname
= "max_user_instances",
63 .data
= &inotify_max_user_instances
,
64 .maxlen
= sizeof(int),
66 .proc_handler
= proc_dointvec_minmax
,
70 .procname
= "max_user_watches",
71 .data
= &inotify_max_user_watches
,
72 .maxlen
= sizeof(int),
74 .proc_handler
= proc_dointvec_minmax
,
78 .procname
= "max_queued_events",
79 .data
= &inotify_max_queued_events
,
80 .maxlen
= sizeof(int),
82 .proc_handler
= proc_dointvec_minmax
,
87 #endif /* CONFIG_SYSCTL */
89 static inline __u32
inotify_arg_to_mask(u32 arg
)
94 * everything should accept their own ignored, cares about children,
95 * and should receive events when the inode is unmounted
97 mask
= (FS_IN_IGNORED
| FS_EVENT_ON_CHILD
| FS_UNMOUNT
);
99 /* mask off the flags used to open the fd */
100 mask
|= (arg
& (IN_ALL_EVENTS
| IN_ONESHOT
| IN_EXCL_UNLINK
));
105 static inline u32
inotify_mask_to_arg(__u32 mask
)
107 return mask
& (IN_ALL_EVENTS
| IN_ISDIR
| IN_UNMOUNT
| IN_IGNORED
|
111 /* intofiy userspace file descriptor functions */
112 static unsigned int inotify_poll(struct file
*file
, poll_table
*wait
)
114 struct fsnotify_group
*group
= file
->private_data
;
117 poll_wait(file
, &group
->notification_waitq
, wait
);
118 mutex_lock(&group
->notification_mutex
);
119 if (!fsnotify_notify_queue_is_empty(group
))
120 ret
= POLLIN
| POLLRDNORM
;
121 mutex_unlock(&group
->notification_mutex
);
126 static int round_event_name_len(struct fsnotify_event
*fsn_event
)
128 struct inotify_event_info
*event
;
130 event
= INOTIFY_E(fsn_event
);
131 if (!event
->name_len
)
133 return roundup(event
->name_len
+ 1, sizeof(struct inotify_event
));
137 * Get an inotify_kernel_event if one exists and is small
138 * enough to fit in "count". Return an error pointer if
141 * Called with the group->notification_mutex held.
143 static struct fsnotify_event
*get_one_event(struct fsnotify_group
*group
,
146 size_t event_size
= sizeof(struct inotify_event
);
147 struct fsnotify_event
*event
;
149 if (fsnotify_notify_queue_is_empty(group
))
152 event
= fsnotify_peek_notify_event(group
);
154 pr_debug("%s: group=%p event=%p\n", __func__
, group
, event
);
156 event_size
+= round_event_name_len(event
);
157 if (event_size
> count
)
158 return ERR_PTR(-EINVAL
);
160 /* held the notification_mutex the whole time, so this is the
161 * same event we peeked above */
162 fsnotify_remove_notify_event(group
);
168 * Copy an event to user space, returning how much we copied.
170 * We already checked that the event size is smaller than the
171 * buffer we had in "get_one_event()" above.
173 static ssize_t
copy_event_to_user(struct fsnotify_group
*group
,
174 struct fsnotify_event
*fsn_event
,
177 struct inotify_event inotify_event
;
178 struct inotify_event_info
*event
;
179 size_t event_size
= sizeof(struct inotify_event
);
183 pr_debug("%s: group=%p event=%p\n", __func__
, group
, fsn_event
);
185 event
= INOTIFY_E(fsn_event
);
186 name_len
= event
->name_len
;
188 * round up name length so it is a multiple of event_size
189 * plus an extra byte for the terminating '\0'.
191 pad_name_len
= round_event_name_len(fsn_event
);
192 inotify_event
.len
= pad_name_len
;
193 inotify_event
.mask
= inotify_mask_to_arg(fsn_event
->mask
);
194 inotify_event
.wd
= event
->wd
;
195 inotify_event
.cookie
= event
->sync_cookie
;
197 /* send the main event */
198 if (copy_to_user(buf
, &inotify_event
, event_size
))
204 * fsnotify only stores the pathname, so here we have to send the pathname
205 * and then pad that pathname out to a multiple of sizeof(inotify_event)
209 /* copy the path name */
210 if (copy_to_user(buf
, event
->name
, name_len
))
214 /* fill userspace with 0's */
215 if (clear_user(buf
, pad_name_len
- name_len
))
217 event_size
+= pad_name_len
;
223 static ssize_t
inotify_read(struct file
*file
, char __user
*buf
,
224 size_t count
, loff_t
*pos
)
226 struct fsnotify_group
*group
;
227 struct fsnotify_event
*kevent
;
233 group
= file
->private_data
;
236 prepare_to_wait(&group
->notification_waitq
, &wait
, TASK_INTERRUPTIBLE
);
238 mutex_lock(&group
->notification_mutex
);
239 kevent
= get_one_event(group
, count
);
240 mutex_unlock(&group
->notification_mutex
);
242 pr_debug("%s: group=%p kevent=%p\n", __func__
, group
, kevent
);
245 ret
= PTR_ERR(kevent
);
248 ret
= copy_event_to_user(group
, kevent
, buf
);
249 fsnotify_destroy_event(group
, kevent
);
258 if (file
->f_flags
& O_NONBLOCK
)
261 if (signal_pending(current
))
270 finish_wait(&group
->notification_waitq
, &wait
);
271 if (start
!= buf
&& ret
!= -EFAULT
)
276 static int inotify_release(struct inode
*ignored
, struct file
*file
)
278 struct fsnotify_group
*group
= file
->private_data
;
280 pr_debug("%s: group=%p\n", __func__
, group
);
282 /* free this group, matching get was inotify_init->fsnotify_obtain_group */
283 fsnotify_destroy_group(group
);
288 static long inotify_ioctl(struct file
*file
, unsigned int cmd
,
291 struct fsnotify_group
*group
;
292 struct fsnotify_event
*fsn_event
;
297 group
= file
->private_data
;
298 p
= (void __user
*) arg
;
300 pr_debug("%s: group=%p cmd=%u\n", __func__
, group
, cmd
);
304 mutex_lock(&group
->notification_mutex
);
305 list_for_each_entry(fsn_event
, &group
->notification_list
,
307 send_len
+= sizeof(struct inotify_event
);
308 send_len
+= round_event_name_len(fsn_event
);
310 mutex_unlock(&group
->notification_mutex
);
311 ret
= put_user(send_len
, (int __user
*) p
);
318 static const struct file_operations inotify_fops
= {
319 .show_fdinfo
= inotify_show_fdinfo
,
320 .poll
= inotify_poll
,
321 .read
= inotify_read
,
322 .fasync
= fsnotify_fasync
,
323 .release
= inotify_release
,
324 .unlocked_ioctl
= inotify_ioctl
,
325 .compat_ioctl
= inotify_ioctl
,
326 .llseek
= noop_llseek
,
331 * find_inode - resolve a user-given path to a specific inode
333 static int inotify_find_inode(const char __user
*dirname
, struct path
*path
, unsigned flags
)
337 error
= user_path_at(AT_FDCWD
, dirname
, flags
, path
);
340 /* you can only watch an inode if you have read permissions on it */
341 error
= inode_permission(path
->dentry
->d_inode
, MAY_READ
);
347 static int inotify_add_to_idr(struct idr
*idr
, spinlock_t
*idr_lock
,
348 struct inotify_inode_mark
*i_mark
)
352 idr_preload(GFP_KERNEL
);
355 ret
= idr_alloc_cyclic(idr
, i_mark
, 1, 0, GFP_NOWAIT
);
357 /* we added the mark to the idr, take a reference */
359 fsnotify_get_mark(&i_mark
->fsn_mark
);
362 spin_unlock(idr_lock
);
364 return ret
< 0 ? ret
: 0;
367 static struct inotify_inode_mark
*inotify_idr_find_locked(struct fsnotify_group
*group
,
370 struct idr
*idr
= &group
->inotify_data
.idr
;
371 spinlock_t
*idr_lock
= &group
->inotify_data
.idr_lock
;
372 struct inotify_inode_mark
*i_mark
;
374 assert_spin_locked(idr_lock
);
376 i_mark
= idr_find(idr
, wd
);
378 struct fsnotify_mark
*fsn_mark
= &i_mark
->fsn_mark
;
380 fsnotify_get_mark(fsn_mark
);
381 /* One ref for being in the idr, one ref we just took */
382 BUG_ON(atomic_read(&fsn_mark
->refcnt
) < 2);
388 static struct inotify_inode_mark
*inotify_idr_find(struct fsnotify_group
*group
,
391 struct inotify_inode_mark
*i_mark
;
392 spinlock_t
*idr_lock
= &group
->inotify_data
.idr_lock
;
395 i_mark
= inotify_idr_find_locked(group
, wd
);
396 spin_unlock(idr_lock
);
401 static void do_inotify_remove_from_idr(struct fsnotify_group
*group
,
402 struct inotify_inode_mark
*i_mark
)
404 struct idr
*idr
= &group
->inotify_data
.idr
;
405 spinlock_t
*idr_lock
= &group
->inotify_data
.idr_lock
;
408 assert_spin_locked(idr_lock
);
412 /* removed from the idr, drop that ref */
413 fsnotify_put_mark(&i_mark
->fsn_mark
);
417 * Remove the mark from the idr (if present) and drop the reference
418 * on the mark because it was in the idr.
420 static void inotify_remove_from_idr(struct fsnotify_group
*group
,
421 struct inotify_inode_mark
*i_mark
)
423 spinlock_t
*idr_lock
= &group
->inotify_data
.idr_lock
;
424 struct inotify_inode_mark
*found_i_mark
= NULL
;
431 * does this i_mark think it is in the idr? we shouldn't get called
435 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
436 " i_mark->inode=%p\n", __func__
, i_mark
, i_mark
->wd
,
437 i_mark
->fsn_mark
.group
, i_mark
->fsn_mark
.i
.inode
);
441 /* Lets look in the idr to see if we find it */
442 found_i_mark
= inotify_idr_find_locked(group
, wd
);
443 if (unlikely(!found_i_mark
)) {
444 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
445 " i_mark->inode=%p\n", __func__
, i_mark
, i_mark
->wd
,
446 i_mark
->fsn_mark
.group
, i_mark
->fsn_mark
.i
.inode
);
451 * We found an mark in the idr at the right wd, but it's
452 * not the mark we were told to remove. eparis seriously
453 * fucked up somewhere.
455 if (unlikely(found_i_mark
!= i_mark
)) {
456 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p "
457 "mark->inode=%p found_i_mark=%p found_i_mark->wd=%d "
458 "found_i_mark->group=%p found_i_mark->inode=%p\n",
459 __func__
, i_mark
, i_mark
->wd
, i_mark
->fsn_mark
.group
,
460 i_mark
->fsn_mark
.i
.inode
, found_i_mark
, found_i_mark
->wd
,
461 found_i_mark
->fsn_mark
.group
,
462 found_i_mark
->fsn_mark
.i
.inode
);
467 * One ref for being in the idr
468 * one ref held by the caller trying to kill us
469 * one ref grabbed by inotify_idr_find
471 if (unlikely(atomic_read(&i_mark
->fsn_mark
.refcnt
) < 3)) {
472 printk(KERN_ERR
"%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
473 " i_mark->inode=%p\n", __func__
, i_mark
, i_mark
->wd
,
474 i_mark
->fsn_mark
.group
, i_mark
->fsn_mark
.i
.inode
);
475 /* we can't really recover with bad ref cnting.. */
479 do_inotify_remove_from_idr(group
, i_mark
);
481 /* match the ref taken by inotify_idr_find_locked() */
483 fsnotify_put_mark(&found_i_mark
->fsn_mark
);
485 spin_unlock(idr_lock
);
489 * Send IN_IGNORED for this wd, remove this wd from the idr.
491 void inotify_ignored_and_remove_idr(struct fsnotify_mark
*fsn_mark
,
492 struct fsnotify_group
*group
)
494 struct inotify_inode_mark
*i_mark
;
496 /* Queue ignore event for the watch */
497 inotify_handle_event(group
, NULL
, fsn_mark
, NULL
, FS_IN_IGNORED
,
498 NULL
, FSNOTIFY_EVENT_NONE
, NULL
, 0);
500 i_mark
= container_of(fsn_mark
, struct inotify_inode_mark
, fsn_mark
);
501 /* remove this mark from the idr */
502 inotify_remove_from_idr(group
, i_mark
);
504 atomic_dec(&group
->inotify_data
.user
->inotify_watches
);
507 /* ding dong the mark is dead */
508 static void inotify_free_mark(struct fsnotify_mark
*fsn_mark
)
510 struct inotify_inode_mark
*i_mark
;
512 i_mark
= container_of(fsn_mark
, struct inotify_inode_mark
, fsn_mark
);
514 kmem_cache_free(inotify_inode_mark_cachep
, i_mark
);
517 static int inotify_update_existing_watch(struct fsnotify_group
*group
,
521 struct fsnotify_mark
*fsn_mark
;
522 struct inotify_inode_mark
*i_mark
;
523 __u32 old_mask
, new_mask
;
525 int add
= (arg
& IN_MASK_ADD
);
528 mask
= inotify_arg_to_mask(arg
);
530 fsn_mark
= fsnotify_find_inode_mark(group
, inode
);
534 i_mark
= container_of(fsn_mark
, struct inotify_inode_mark
, fsn_mark
);
536 spin_lock(&fsn_mark
->lock
);
538 old_mask
= fsn_mark
->mask
;
540 fsnotify_set_mark_mask_locked(fsn_mark
, (fsn_mark
->mask
| mask
));
542 fsnotify_set_mark_mask_locked(fsn_mark
, mask
);
543 new_mask
= fsn_mark
->mask
;
545 spin_unlock(&fsn_mark
->lock
);
547 if (old_mask
!= new_mask
) {
548 /* more bits in old than in new? */
549 int dropped
= (old_mask
& ~new_mask
);
550 /* more bits in this fsn_mark than the inode's mask? */
551 int do_inode
= (new_mask
& ~inode
->i_fsnotify_mask
);
553 /* update the inode with this new fsn_mark */
554 if (dropped
|| do_inode
)
555 fsnotify_recalc_inode_mask(inode
);
562 /* match the get from fsnotify_find_mark() */
563 fsnotify_put_mark(fsn_mark
);
568 static int inotify_new_watch(struct fsnotify_group
*group
,
572 struct inotify_inode_mark
*tmp_i_mark
;
575 struct idr
*idr
= &group
->inotify_data
.idr
;
576 spinlock_t
*idr_lock
= &group
->inotify_data
.idr_lock
;
578 mask
= inotify_arg_to_mask(arg
);
580 tmp_i_mark
= kmem_cache_alloc(inotify_inode_mark_cachep
, GFP_KERNEL
);
581 if (unlikely(!tmp_i_mark
))
584 fsnotify_init_mark(&tmp_i_mark
->fsn_mark
, inotify_free_mark
);
585 tmp_i_mark
->fsn_mark
.mask
= mask
;
589 if (atomic_read(&group
->inotify_data
.user
->inotify_watches
) >= inotify_max_user_watches
)
592 ret
= inotify_add_to_idr(idr
, idr_lock
, tmp_i_mark
);
596 /* we are on the idr, now get on the inode */
597 ret
= fsnotify_add_mark_locked(&tmp_i_mark
->fsn_mark
, group
, inode
,
600 /* we failed to get on the inode, get off the idr */
601 inotify_remove_from_idr(group
, tmp_i_mark
);
605 /* increment the number of watches the user has */
606 atomic_inc(&group
->inotify_data
.user
->inotify_watches
);
608 /* return the watch descriptor for this new mark */
609 ret
= tmp_i_mark
->wd
;
612 /* match the ref from fsnotify_init_mark() */
613 fsnotify_put_mark(&tmp_i_mark
->fsn_mark
);
618 static int inotify_update_watch(struct fsnotify_group
*group
, struct inode
*inode
, u32 arg
)
622 mutex_lock(&group
->mark_mutex
);
623 /* try to update and existing watch with the new arg */
624 ret
= inotify_update_existing_watch(group
, inode
, arg
);
625 /* no mark present, try to add a new one */
627 ret
= inotify_new_watch(group
, inode
, arg
);
628 mutex_unlock(&group
->mark_mutex
);
633 static struct fsnotify_group
*inotify_new_group(unsigned int max_events
)
635 struct fsnotify_group
*group
;
636 struct inotify_event_info
*oevent
;
638 group
= fsnotify_alloc_group(&inotify_fsnotify_ops
);
642 oevent
= kmalloc(sizeof(struct inotify_event_info
), GFP_KERNEL
);
643 if (unlikely(!oevent
)) {
644 fsnotify_destroy_group(group
);
645 return ERR_PTR(-ENOMEM
);
647 group
->overflow_event
= &oevent
->fse
;
648 fsnotify_init_event(group
->overflow_event
, NULL
, FS_Q_OVERFLOW
);
650 oevent
->sync_cookie
= 0;
651 oevent
->name_len
= 0;
653 group
->max_events
= max_events
;
655 spin_lock_init(&group
->inotify_data
.idr_lock
);
656 idr_init(&group
->inotify_data
.idr
);
657 group
->inotify_data
.user
= get_current_user();
659 if (atomic_inc_return(&group
->inotify_data
.user
->inotify_devs
) >
660 inotify_max_user_instances
) {
661 fsnotify_destroy_group(group
);
662 return ERR_PTR(-EMFILE
);
669 /* inotify syscalls */
670 SYSCALL_DEFINE1(inotify_init1
, int, flags
)
672 struct fsnotify_group
*group
;
675 /* Check the IN_* constants for consistency. */
676 BUILD_BUG_ON(IN_CLOEXEC
!= O_CLOEXEC
);
677 BUILD_BUG_ON(IN_NONBLOCK
!= O_NONBLOCK
);
679 if (flags
& ~(IN_CLOEXEC
| IN_NONBLOCK
))
682 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
683 group
= inotify_new_group(inotify_max_queued_events
);
685 return PTR_ERR(group
);
687 ret
= anon_inode_getfd("inotify", &inotify_fops
, group
,
690 fsnotify_destroy_group(group
);
695 SYSCALL_DEFINE0(inotify_init
)
697 return sys_inotify_init1(0);
700 SYSCALL_DEFINE3(inotify_add_watch
, int, fd
, const char __user
*, pathname
,
703 struct fsnotify_group
*group
;
710 /* don't allow invalid bits: we don't want flags set */
711 if (unlikely(!(mask
& ALL_INOTIFY_BITS
)))
715 if (unlikely(!f
.file
))
718 /* verify that this is indeed an inotify instance */
719 if (unlikely(f
.file
->f_op
!= &inotify_fops
)) {
724 if (!(mask
& IN_DONT_FOLLOW
))
725 flags
|= LOOKUP_FOLLOW
;
726 if (mask
& IN_ONLYDIR
)
727 flags
|= LOOKUP_DIRECTORY
;
729 ret
= inotify_find_inode(pathname
, &path
, flags
);
733 /* inode held in place by reference to path; group by fget on fd */
734 inode
= path
.dentry
->d_inode
;
735 group
= f
.file
->private_data
;
737 /* create/update an inode mark */
738 ret
= inotify_update_watch(group
, inode
, mask
);
745 SYSCALL_DEFINE2(inotify_rm_watch
, int, fd
, __s32
, wd
)
747 struct fsnotify_group
*group
;
748 struct inotify_inode_mark
*i_mark
;
753 if (unlikely(!f
.file
))
756 /* verify that this is indeed an inotify instance */
758 if (unlikely(f
.file
->f_op
!= &inotify_fops
))
761 group
= f
.file
->private_data
;
764 i_mark
= inotify_idr_find(group
, wd
);
765 if (unlikely(!i_mark
))
770 fsnotify_destroy_mark(&i_mark
->fsn_mark
, group
);
772 /* match ref taken by inotify_idr_find */
773 fsnotify_put_mark(&i_mark
->fsn_mark
);
781 * inotify_user_setup - Our initialization function. Note that we cannot return
782 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
783 * must result in panic().
785 static int __init
inotify_user_setup(void)
787 BUILD_BUG_ON(IN_ACCESS
!= FS_ACCESS
);
788 BUILD_BUG_ON(IN_MODIFY
!= FS_MODIFY
);
789 BUILD_BUG_ON(IN_ATTRIB
!= FS_ATTRIB
);
790 BUILD_BUG_ON(IN_CLOSE_WRITE
!= FS_CLOSE_WRITE
);
791 BUILD_BUG_ON(IN_CLOSE_NOWRITE
!= FS_CLOSE_NOWRITE
);
792 BUILD_BUG_ON(IN_OPEN
!= FS_OPEN
);
793 BUILD_BUG_ON(IN_MOVED_FROM
!= FS_MOVED_FROM
);
794 BUILD_BUG_ON(IN_MOVED_TO
!= FS_MOVED_TO
);
795 BUILD_BUG_ON(IN_CREATE
!= FS_CREATE
);
796 BUILD_BUG_ON(IN_DELETE
!= FS_DELETE
);
797 BUILD_BUG_ON(IN_DELETE_SELF
!= FS_DELETE_SELF
);
798 BUILD_BUG_ON(IN_MOVE_SELF
!= FS_MOVE_SELF
);
799 BUILD_BUG_ON(IN_UNMOUNT
!= FS_UNMOUNT
);
800 BUILD_BUG_ON(IN_Q_OVERFLOW
!= FS_Q_OVERFLOW
);
801 BUILD_BUG_ON(IN_IGNORED
!= FS_IN_IGNORED
);
802 BUILD_BUG_ON(IN_EXCL_UNLINK
!= FS_EXCL_UNLINK
);
803 BUILD_BUG_ON(IN_ISDIR
!= FS_ISDIR
);
804 BUILD_BUG_ON(IN_ONESHOT
!= FS_IN_ONESHOT
);
806 BUG_ON(hweight32(ALL_INOTIFY_BITS
) != 21);
808 inotify_inode_mark_cachep
= KMEM_CACHE(inotify_inode_mark
, SLAB_PANIC
);
810 inotify_max_queued_events
= 16384;
811 inotify_max_user_instances
= 128;
812 inotify_max_user_watches
= 8192;
816 module_init(inotify_user_setup
);