2 * fs/inotify_user.c - inotify support for userspace
5 * John McCutchan <ttb@tentacle.dhs.org>
6 * Robert Love <rml@novell.com>
8 * Copyright (C) 2005 John McCutchan
9 * Copyright 2006 Hewlett-Packard Development Company, L.P.
11 * Copyright (C) 2009 Eric Paris <Red Hat Inc>
12 * inotify was largely rewriten to make use of the fsnotify infrastructure
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2, or (at your option) any
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
25 #include <linux/file.h>
26 #include <linux/fs.h> /* struct inode */
27 #include <linux/fsnotify_backend.h>
28 #include <linux/idr.h>
29 #include <linux/init.h> /* fs_initcall */
30 #include <linux/inotify.h>
31 #include <linux/kernel.h> /* roundup() */
32 #include <linux/namei.h> /* LOOKUP_FOLLOW */
33 #include <linux/sched/signal.h>
34 #include <linux/slab.h> /* struct kmem_cache */
35 #include <linux/syscalls.h>
36 #include <linux/types.h>
37 #include <linux/anon_inodes.h>
38 #include <linux/uaccess.h>
39 #include <linux/poll.h>
40 #include <linux/wait.h>
43 #include "../fdinfo.h"
45 #include <asm/ioctls.h>
47 /* configurable via /proc/sys/fs/inotify/ */
48 static int inotify_max_queued_events __read_mostly
;
50 struct kmem_cache
*inotify_inode_mark_cachep __read_mostly
;
54 #include <linux/sysctl.h>
58 struct ctl_table inotify_table
[] = {
60 .procname
= "max_user_instances",
61 .data
= &init_user_ns
.ucount_max
[UCOUNT_INOTIFY_INSTANCES
],
62 .maxlen
= sizeof(int),
64 .proc_handler
= proc_dointvec_minmax
,
68 .procname
= "max_user_watches",
69 .data
= &init_user_ns
.ucount_max
[UCOUNT_INOTIFY_WATCHES
],
70 .maxlen
= sizeof(int),
72 .proc_handler
= proc_dointvec_minmax
,
76 .procname
= "max_queued_events",
77 .data
= &inotify_max_queued_events
,
78 .maxlen
= sizeof(int),
80 .proc_handler
= proc_dointvec_minmax
,
85 #endif /* CONFIG_SYSCTL */
87 static inline __u32
inotify_arg_to_mask(u32 arg
)
92 * everything should accept their own ignored, cares about children,
93 * and should receive events when the inode is unmounted
95 mask
= (FS_IN_IGNORED
| FS_EVENT_ON_CHILD
| FS_UNMOUNT
);
97 /* mask off the flags used to open the fd */
98 mask
|= (arg
& (IN_ALL_EVENTS
| IN_ONESHOT
| IN_EXCL_UNLINK
));
103 static inline u32
inotify_mask_to_arg(__u32 mask
)
105 return mask
& (IN_ALL_EVENTS
| IN_ISDIR
| IN_UNMOUNT
| IN_IGNORED
|
109 /* intofiy userspace file descriptor functions */
110 static __poll_t
inotify_poll(struct file
*file
, poll_table
*wait
)
112 struct fsnotify_group
*group
= file
->private_data
;
115 poll_wait(file
, &group
->notification_waitq
, wait
);
116 spin_lock(&group
->notification_lock
);
117 if (!fsnotify_notify_queue_is_empty(group
))
118 ret
= EPOLLIN
| EPOLLRDNORM
;
119 spin_unlock(&group
->notification_lock
);
124 static int round_event_name_len(struct fsnotify_event
*fsn_event
)
126 struct inotify_event_info
*event
;
128 event
= INOTIFY_E(fsn_event
);
129 if (!event
->name_len
)
131 return roundup(event
->name_len
+ 1, sizeof(struct inotify_event
));
135 * Get an inotify_kernel_event if one exists and is small
136 * enough to fit in "count". Return an error pointer if
139 * Called with the group->notification_lock held.
141 static struct fsnotify_event
*get_one_event(struct fsnotify_group
*group
,
144 size_t event_size
= sizeof(struct inotify_event
);
145 struct fsnotify_event
*event
;
147 if (fsnotify_notify_queue_is_empty(group
))
150 event
= fsnotify_peek_first_event(group
);
152 pr_debug("%s: group=%p event=%p\n", __func__
, group
, event
);
154 event_size
+= round_event_name_len(event
);
155 if (event_size
> count
)
156 return ERR_PTR(-EINVAL
);
158 /* held the notification_lock the whole time, so this is the
159 * same event we peeked above */
160 fsnotify_remove_first_event(group
);
166 * Copy an event to user space, returning how much we copied.
168 * We already checked that the event size is smaller than the
169 * buffer we had in "get_one_event()" above.
171 static ssize_t
copy_event_to_user(struct fsnotify_group
*group
,
172 struct fsnotify_event
*fsn_event
,
175 struct inotify_event inotify_event
;
176 struct inotify_event_info
*event
;
177 size_t event_size
= sizeof(struct inotify_event
);
181 pr_debug("%s: group=%p event=%p\n", __func__
, group
, fsn_event
);
183 event
= INOTIFY_E(fsn_event
);
184 name_len
= event
->name_len
;
186 * round up name length so it is a multiple of event_size
187 * plus an extra byte for the terminating '\0'.
189 pad_name_len
= round_event_name_len(fsn_event
);
190 inotify_event
.len
= pad_name_len
;
191 inotify_event
.mask
= inotify_mask_to_arg(fsn_event
->mask
);
192 inotify_event
.wd
= event
->wd
;
193 inotify_event
.cookie
= event
->sync_cookie
;
195 /* send the main event */
196 if (copy_to_user(buf
, &inotify_event
, event_size
))
202 * fsnotify only stores the pathname, so here we have to send the pathname
203 * and then pad that pathname out to a multiple of sizeof(inotify_event)
207 /* copy the path name */
208 if (copy_to_user(buf
, event
->name
, name_len
))
212 /* fill userspace with 0's */
213 if (clear_user(buf
, pad_name_len
- name_len
))
215 event_size
+= pad_name_len
;
221 static ssize_t
inotify_read(struct file
*file
, char __user
*buf
,
222 size_t count
, loff_t
*pos
)
224 struct fsnotify_group
*group
;
225 struct fsnotify_event
*kevent
;
228 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
231 group
= file
->private_data
;
233 add_wait_queue(&group
->notification_waitq
, &wait
);
235 spin_lock(&group
->notification_lock
);
236 kevent
= get_one_event(group
, count
);
237 spin_unlock(&group
->notification_lock
);
239 pr_debug("%s: group=%p kevent=%p\n", __func__
, group
, kevent
);
242 ret
= PTR_ERR(kevent
);
245 ret
= copy_event_to_user(group
, kevent
, buf
);
246 fsnotify_destroy_event(group
, kevent
);
255 if (file
->f_flags
& O_NONBLOCK
)
258 if (signal_pending(current
))
264 wait_woken(&wait
, TASK_INTERRUPTIBLE
, MAX_SCHEDULE_TIMEOUT
);
266 remove_wait_queue(&group
->notification_waitq
, &wait
);
268 if (start
!= buf
&& ret
!= -EFAULT
)
273 static int inotify_release(struct inode
*ignored
, struct file
*file
)
275 struct fsnotify_group
*group
= file
->private_data
;
277 pr_debug("%s: group=%p\n", __func__
, group
);
279 /* free this group, matching get was inotify_init->fsnotify_obtain_group */
280 fsnotify_destroy_group(group
);
285 static long inotify_ioctl(struct file
*file
, unsigned int cmd
,
288 struct fsnotify_group
*group
;
289 struct fsnotify_event
*fsn_event
;
294 group
= file
->private_data
;
295 p
= (void __user
*) arg
;
297 pr_debug("%s: group=%p cmd=%u\n", __func__
, group
, cmd
);
301 spin_lock(&group
->notification_lock
);
302 list_for_each_entry(fsn_event
, &group
->notification_list
,
304 send_len
+= sizeof(struct inotify_event
);
305 send_len
+= round_event_name_len(fsn_event
);
307 spin_unlock(&group
->notification_lock
);
308 ret
= put_user(send_len
, (int __user
*) p
);
310 #ifdef CONFIG_CHECKPOINT_RESTORE
311 case INOTIFY_IOC_SETNEXTWD
:
313 if (arg
>= 1 && arg
<= INT_MAX
) {
314 struct inotify_group_private_data
*data
;
316 data
= &group
->inotify_data
;
317 spin_lock(&data
->idr_lock
);
318 idr_set_cursor(&data
->idr
, (unsigned int)arg
);
319 spin_unlock(&data
->idr_lock
);
323 #endif /* CONFIG_CHECKPOINT_RESTORE */
329 static const struct file_operations inotify_fops
= {
330 .show_fdinfo
= inotify_show_fdinfo
,
331 .poll
= inotify_poll
,
332 .read
= inotify_read
,
333 .fasync
= fsnotify_fasync
,
334 .release
= inotify_release
,
335 .unlocked_ioctl
= inotify_ioctl
,
336 .compat_ioctl
= inotify_ioctl
,
337 .llseek
= noop_llseek
,
342 * find_inode - resolve a user-given path to a specific inode
344 static int inotify_find_inode(const char __user
*dirname
, struct path
*path
, unsigned flags
)
348 error
= user_path_at(AT_FDCWD
, dirname
, flags
, path
);
351 /* you can only watch an inode if you have read permissions on it */
352 error
= inode_permission(path
->dentry
->d_inode
, MAY_READ
);
358 static int inotify_add_to_idr(struct idr
*idr
, spinlock_t
*idr_lock
,
359 struct inotify_inode_mark
*i_mark
)
363 idr_preload(GFP_KERNEL
);
366 ret
= idr_alloc_cyclic(idr
, i_mark
, 1, 0, GFP_NOWAIT
);
368 /* we added the mark to the idr, take a reference */
370 fsnotify_get_mark(&i_mark
->fsn_mark
);
373 spin_unlock(idr_lock
);
375 return ret
< 0 ? ret
: 0;
378 static struct inotify_inode_mark
*inotify_idr_find_locked(struct fsnotify_group
*group
,
381 struct idr
*idr
= &group
->inotify_data
.idr
;
382 spinlock_t
*idr_lock
= &group
->inotify_data
.idr_lock
;
383 struct inotify_inode_mark
*i_mark
;
385 assert_spin_locked(idr_lock
);
387 i_mark
= idr_find(idr
, wd
);
389 struct fsnotify_mark
*fsn_mark
= &i_mark
->fsn_mark
;
391 fsnotify_get_mark(fsn_mark
);
392 /* One ref for being in the idr, one ref we just took */
393 BUG_ON(refcount_read(&fsn_mark
->refcnt
) < 2);
399 static struct inotify_inode_mark
*inotify_idr_find(struct fsnotify_group
*group
,
402 struct inotify_inode_mark
*i_mark
;
403 spinlock_t
*idr_lock
= &group
->inotify_data
.idr_lock
;
406 i_mark
= inotify_idr_find_locked(group
, wd
);
407 spin_unlock(idr_lock
);
413 * Remove the mark from the idr (if present) and drop the reference
414 * on the mark because it was in the idr.
416 static void inotify_remove_from_idr(struct fsnotify_group
*group
,
417 struct inotify_inode_mark
*i_mark
)
419 struct idr
*idr
= &group
->inotify_data
.idr
;
420 spinlock_t
*idr_lock
= &group
->inotify_data
.idr_lock
;
421 struct inotify_inode_mark
*found_i_mark
= NULL
;
428 * does this i_mark think it is in the idr? we shouldn't get called
432 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
433 __func__
, i_mark
, i_mark
->wd
, i_mark
->fsn_mark
.group
);
437 /* Lets look in the idr to see if we find it */
438 found_i_mark
= inotify_idr_find_locked(group
, wd
);
439 if (unlikely(!found_i_mark
)) {
440 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
441 __func__
, i_mark
, i_mark
->wd
, i_mark
->fsn_mark
.group
);
446 * We found an mark in the idr at the right wd, but it's
447 * not the mark we were told to remove. eparis seriously
448 * fucked up somewhere.
450 if (unlikely(found_i_mark
!= i_mark
)) {
451 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p "
452 "found_i_mark=%p found_i_mark->wd=%d "
453 "found_i_mark->group=%p\n", __func__
, i_mark
,
454 i_mark
->wd
, i_mark
->fsn_mark
.group
, found_i_mark
,
455 found_i_mark
->wd
, found_i_mark
->fsn_mark
.group
);
460 * One ref for being in the idr
461 * one ref grabbed by inotify_idr_find
463 if (unlikely(refcount_read(&i_mark
->fsn_mark
.refcnt
) < 2)) {
464 printk(KERN_ERR
"%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
465 __func__
, i_mark
, i_mark
->wd
, i_mark
->fsn_mark
.group
);
466 /* we can't really recover with bad ref cnting.. */
471 /* Removed from the idr, drop that ref. */
472 fsnotify_put_mark(&i_mark
->fsn_mark
);
475 spin_unlock(idr_lock
);
476 /* match the ref taken by inotify_idr_find_locked() */
478 fsnotify_put_mark(&found_i_mark
->fsn_mark
);
482 * Send IN_IGNORED for this wd, remove this wd from the idr.
484 void inotify_ignored_and_remove_idr(struct fsnotify_mark
*fsn_mark
,
485 struct fsnotify_group
*group
)
487 struct inotify_inode_mark
*i_mark
;
488 struct fsnotify_iter_info iter_info
= { };
490 fsnotify_iter_set_report_type_mark(&iter_info
, FSNOTIFY_OBJ_TYPE_INODE
,
493 /* Queue ignore event for the watch */
494 inotify_handle_event(group
, NULL
, FS_IN_IGNORED
, NULL
,
495 FSNOTIFY_EVENT_NONE
, NULL
, 0, &iter_info
);
497 i_mark
= container_of(fsn_mark
, struct inotify_inode_mark
, fsn_mark
);
498 /* remove this mark from the idr */
499 inotify_remove_from_idr(group
, i_mark
);
501 dec_inotify_watches(group
->inotify_data
.ucounts
);
504 static int inotify_update_existing_watch(struct fsnotify_group
*group
,
508 struct fsnotify_mark
*fsn_mark
;
509 struct inotify_inode_mark
*i_mark
;
510 __u32 old_mask
, new_mask
;
512 int add
= (arg
& IN_MASK_ADD
);
515 mask
= inotify_arg_to_mask(arg
);
517 fsn_mark
= fsnotify_find_mark(&inode
->i_fsnotify_marks
, group
);
521 i_mark
= container_of(fsn_mark
, struct inotify_inode_mark
, fsn_mark
);
523 spin_lock(&fsn_mark
->lock
);
524 old_mask
= fsn_mark
->mask
;
526 fsn_mark
->mask
|= mask
;
528 fsn_mark
->mask
= mask
;
529 new_mask
= fsn_mark
->mask
;
530 spin_unlock(&fsn_mark
->lock
);
532 if (old_mask
!= new_mask
) {
533 /* more bits in old than in new? */
534 int dropped
= (old_mask
& ~new_mask
);
535 /* more bits in this fsn_mark than the inode's mask? */
536 int do_inode
= (new_mask
& ~inode
->i_fsnotify_mask
);
538 /* update the inode with this new fsn_mark */
539 if (dropped
|| do_inode
)
540 fsnotify_recalc_mask(inode
->i_fsnotify_marks
);
547 /* match the get from fsnotify_find_mark() */
548 fsnotify_put_mark(fsn_mark
);
553 static int inotify_new_watch(struct fsnotify_group
*group
,
557 struct inotify_inode_mark
*tmp_i_mark
;
560 struct idr
*idr
= &group
->inotify_data
.idr
;
561 spinlock_t
*idr_lock
= &group
->inotify_data
.idr_lock
;
563 mask
= inotify_arg_to_mask(arg
);
565 tmp_i_mark
= kmem_cache_alloc(inotify_inode_mark_cachep
, GFP_KERNEL
);
566 if (unlikely(!tmp_i_mark
))
569 fsnotify_init_mark(&tmp_i_mark
->fsn_mark
, group
);
570 tmp_i_mark
->fsn_mark
.mask
= mask
;
573 ret
= inotify_add_to_idr(idr
, idr_lock
, tmp_i_mark
);
577 /* increment the number of watches the user has */
578 if (!inc_inotify_watches(group
->inotify_data
.ucounts
)) {
579 inotify_remove_from_idr(group
, tmp_i_mark
);
584 /* we are on the idr, now get on the inode */
585 ret
= fsnotify_add_inode_mark_locked(&tmp_i_mark
->fsn_mark
, inode
, 0);
587 /* we failed to get on the inode, get off the idr */
588 inotify_remove_from_idr(group
, tmp_i_mark
);
593 /* return the watch descriptor for this new mark */
594 ret
= tmp_i_mark
->wd
;
597 /* match the ref from fsnotify_init_mark() */
598 fsnotify_put_mark(&tmp_i_mark
->fsn_mark
);
603 static int inotify_update_watch(struct fsnotify_group
*group
, struct inode
*inode
, u32 arg
)
607 mutex_lock(&group
->mark_mutex
);
608 /* try to update and existing watch with the new arg */
609 ret
= inotify_update_existing_watch(group
, inode
, arg
);
610 /* no mark present, try to add a new one */
612 ret
= inotify_new_watch(group
, inode
, arg
);
613 mutex_unlock(&group
->mark_mutex
);
618 static struct fsnotify_group
*inotify_new_group(unsigned int max_events
)
620 struct fsnotify_group
*group
;
621 struct inotify_event_info
*oevent
;
623 group
= fsnotify_alloc_group(&inotify_fsnotify_ops
);
627 oevent
= kmalloc(sizeof(struct inotify_event_info
), GFP_KERNEL
);
628 if (unlikely(!oevent
)) {
629 fsnotify_destroy_group(group
);
630 return ERR_PTR(-ENOMEM
);
632 group
->overflow_event
= &oevent
->fse
;
633 fsnotify_init_event(group
->overflow_event
, NULL
, FS_Q_OVERFLOW
);
635 oevent
->sync_cookie
= 0;
636 oevent
->name_len
= 0;
638 group
->max_events
= max_events
;
640 spin_lock_init(&group
->inotify_data
.idr_lock
);
641 idr_init(&group
->inotify_data
.idr
);
642 group
->inotify_data
.ucounts
= inc_ucount(current_user_ns(),
644 UCOUNT_INOTIFY_INSTANCES
);
646 if (!group
->inotify_data
.ucounts
) {
647 fsnotify_destroy_group(group
);
648 return ERR_PTR(-EMFILE
);
655 /* inotify syscalls */
656 static int do_inotify_init(int flags
)
658 struct fsnotify_group
*group
;
661 /* Check the IN_* constants for consistency. */
662 BUILD_BUG_ON(IN_CLOEXEC
!= O_CLOEXEC
);
663 BUILD_BUG_ON(IN_NONBLOCK
!= O_NONBLOCK
);
665 if (flags
& ~(IN_CLOEXEC
| IN_NONBLOCK
))
668 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
669 group
= inotify_new_group(inotify_max_queued_events
);
671 return PTR_ERR(group
);
673 ret
= anon_inode_getfd("inotify", &inotify_fops
, group
,
676 fsnotify_destroy_group(group
);
681 SYSCALL_DEFINE1(inotify_init1
, int, flags
)
683 return do_inotify_init(flags
);
686 SYSCALL_DEFINE0(inotify_init
)
688 return do_inotify_init(0);
691 SYSCALL_DEFINE3(inotify_add_watch
, int, fd
, const char __user
*, pathname
,
694 struct fsnotify_group
*group
;
702 * We share a lot of code with fs/dnotify. We also share
703 * the bit layout between inotify's IN_* and the fsnotify
704 * FS_*. This check ensures that only the inotify IN_*
705 * bits get passed in and set in watches/events.
707 if (unlikely(mask
& ~ALL_INOTIFY_BITS
))
710 * Require at least one valid bit set in the mask.
711 * Without _something_ set, we would have no events to
714 if (unlikely(!(mask
& ALL_INOTIFY_BITS
)))
718 if (unlikely(!f
.file
))
721 /* verify that this is indeed an inotify instance */
722 if (unlikely(f
.file
->f_op
!= &inotify_fops
)) {
727 if (!(mask
& IN_DONT_FOLLOW
))
728 flags
|= LOOKUP_FOLLOW
;
729 if (mask
& IN_ONLYDIR
)
730 flags
|= LOOKUP_DIRECTORY
;
732 ret
= inotify_find_inode(pathname
, &path
, flags
);
736 /* inode held in place by reference to path; group by fget on fd */
737 inode
= path
.dentry
->d_inode
;
738 group
= f
.file
->private_data
;
740 /* create/update an inode mark */
741 ret
= inotify_update_watch(group
, inode
, mask
);
748 SYSCALL_DEFINE2(inotify_rm_watch
, int, fd
, __s32
, wd
)
750 struct fsnotify_group
*group
;
751 struct inotify_inode_mark
*i_mark
;
756 if (unlikely(!f
.file
))
759 /* verify that this is indeed an inotify instance */
761 if (unlikely(f
.file
->f_op
!= &inotify_fops
))
764 group
= f
.file
->private_data
;
767 i_mark
= inotify_idr_find(group
, wd
);
768 if (unlikely(!i_mark
))
773 fsnotify_destroy_mark(&i_mark
->fsn_mark
, group
);
775 /* match ref taken by inotify_idr_find */
776 fsnotify_put_mark(&i_mark
->fsn_mark
);
784 * inotify_user_setup - Our initialization function. Note that we cannot return
785 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
786 * must result in panic().
788 static int __init
inotify_user_setup(void)
790 BUILD_BUG_ON(IN_ACCESS
!= FS_ACCESS
);
791 BUILD_BUG_ON(IN_MODIFY
!= FS_MODIFY
);
792 BUILD_BUG_ON(IN_ATTRIB
!= FS_ATTRIB
);
793 BUILD_BUG_ON(IN_CLOSE_WRITE
!= FS_CLOSE_WRITE
);
794 BUILD_BUG_ON(IN_CLOSE_NOWRITE
!= FS_CLOSE_NOWRITE
);
795 BUILD_BUG_ON(IN_OPEN
!= FS_OPEN
);
796 BUILD_BUG_ON(IN_MOVED_FROM
!= FS_MOVED_FROM
);
797 BUILD_BUG_ON(IN_MOVED_TO
!= FS_MOVED_TO
);
798 BUILD_BUG_ON(IN_CREATE
!= FS_CREATE
);
799 BUILD_BUG_ON(IN_DELETE
!= FS_DELETE
);
800 BUILD_BUG_ON(IN_DELETE_SELF
!= FS_DELETE_SELF
);
801 BUILD_BUG_ON(IN_MOVE_SELF
!= FS_MOVE_SELF
);
802 BUILD_BUG_ON(IN_UNMOUNT
!= FS_UNMOUNT
);
803 BUILD_BUG_ON(IN_Q_OVERFLOW
!= FS_Q_OVERFLOW
);
804 BUILD_BUG_ON(IN_IGNORED
!= FS_IN_IGNORED
);
805 BUILD_BUG_ON(IN_EXCL_UNLINK
!= FS_EXCL_UNLINK
);
806 BUILD_BUG_ON(IN_ISDIR
!= FS_ISDIR
);
807 BUILD_BUG_ON(IN_ONESHOT
!= FS_IN_ONESHOT
);
809 BUG_ON(hweight32(ALL_INOTIFY_BITS
) != 21);
811 inotify_inode_mark_cachep
= KMEM_CACHE(inotify_inode_mark
, SLAB_PANIC
);
813 inotify_max_queued_events
= 16384;
814 init_user_ns
.ucount_max
[UCOUNT_INOTIFY_INSTANCES
] = 128;
815 init_user_ns
.ucount_max
[UCOUNT_INOTIFY_WATCHES
] = 8192;
819 fs_initcall(inotify_user_setup
);