mm-only debug patch...
[mmotm.git] / fs / notify / inotify / inotify_user.c
blob43930437a08630a50ffe39cb9341a524ebad3298
1 /*
2 * fs/inotify_user.c - inotify support for userspace
4 * Authors:
5 * John McCutchan <ttb@tentacle.dhs.org>
6 * Robert Love <rml@novell.com>
8 * Copyright (C) 2005 John McCutchan
9 * Copyright 2006 Hewlett-Packard Development Company, L.P.
11 * Copyright (C) 2009 Eric Paris <Red Hat Inc>
12 * inotify was largely rewriten to make use of the fsnotify infrastructure
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2, or (at your option) any
17 * later version.
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
25 #include <linux/file.h>
26 #include <linux/fs.h> /* struct inode */
27 #include <linux/fsnotify_backend.h>
28 #include <linux/idr.h>
29 #include <linux/init.h> /* module_init */
30 #include <linux/inotify.h>
31 #include <linux/kernel.h> /* roundup() */
32 #include <linux/magic.h> /* superblock magic number */
33 #include <linux/mount.h> /* mntget */
34 #include <linux/namei.h> /* LOOKUP_FOLLOW */
35 #include <linux/path.h> /* struct path */
36 #include <linux/sched.h> /* struct user */
37 #include <linux/slab.h> /* struct kmem_cache */
38 #include <linux/syscalls.h>
39 #include <linux/types.h>
40 #include <linux/uaccess.h>
41 #include <linux/poll.h>
42 #include <linux/wait.h>
44 #include "inotify.h"
46 #include <asm/ioctls.h>
48 static struct vfsmount *inotify_mnt __read_mostly;
50 /* these are configurable via /proc/sys/fs/inotify/ */
51 static int inotify_max_user_instances __read_mostly;
52 static int inotify_max_queued_events __read_mostly;
53 int inotify_max_user_watches __read_mostly;
55 static struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
56 struct kmem_cache *event_priv_cachep __read_mostly;
58 #ifdef CONFIG_SYSCTL
60 #include <linux/sysctl.h>
62 static int zero;
64 ctl_table inotify_table[] = {
66 .ctl_name = INOTIFY_MAX_USER_INSTANCES,
67 .procname = "max_user_instances",
68 .data = &inotify_max_user_instances,
69 .maxlen = sizeof(int),
70 .mode = 0644,
71 .proc_handler = &proc_dointvec_minmax,
72 .strategy = &sysctl_intvec,
73 .extra1 = &zero,
76 .ctl_name = INOTIFY_MAX_USER_WATCHES,
77 .procname = "max_user_watches",
78 .data = &inotify_max_user_watches,
79 .maxlen = sizeof(int),
80 .mode = 0644,
81 .proc_handler = &proc_dointvec_minmax,
82 .strategy = &sysctl_intvec,
83 .extra1 = &zero,
86 .ctl_name = INOTIFY_MAX_QUEUED_EVENTS,
87 .procname = "max_queued_events",
88 .data = &inotify_max_queued_events,
89 .maxlen = sizeof(int),
90 .mode = 0644,
91 .proc_handler = &proc_dointvec_minmax,
92 .strategy = &sysctl_intvec,
93 .extra1 = &zero
95 { .ctl_name = 0 }
97 #endif /* CONFIG_SYSCTL */
99 static inline __u32 inotify_arg_to_mask(u32 arg)
101 __u32 mask;
103 /* everything should accept their own ignored and cares about children */
104 mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD);
106 /* mask off the flags used to open the fd */
107 mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT));
109 return mask;
112 static inline u32 inotify_mask_to_arg(__u32 mask)
114 return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED |
115 IN_Q_OVERFLOW);
118 /* intofiy userspace file descriptor functions */
119 static unsigned int inotify_poll(struct file *file, poll_table *wait)
121 struct fsnotify_group *group = file->private_data;
122 int ret = 0;
124 poll_wait(file, &group->notification_waitq, wait);
125 mutex_lock(&group->notification_mutex);
126 if (!fsnotify_notify_queue_is_empty(group))
127 ret = POLLIN | POLLRDNORM;
128 mutex_unlock(&group->notification_mutex);
130 return ret;
134 * Get an inotify_kernel_event if one exists and is small
135 * enough to fit in "count". Return an error pointer if
136 * not large enough.
138 * Called with the group->notification_mutex held.
140 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
141 size_t count)
143 size_t event_size = sizeof(struct inotify_event);
144 struct fsnotify_event *event;
146 if (fsnotify_notify_queue_is_empty(group))
147 return NULL;
149 event = fsnotify_peek_notify_event(group);
151 if (event->name_len)
152 event_size += roundup(event->name_len + 1, event_size);
154 if (event_size > count)
155 return ERR_PTR(-EINVAL);
157 /* held the notification_mutex the whole time, so this is the
158 * same event we peeked above */
159 fsnotify_remove_notify_event(group);
161 return event;
165 * Copy an event to user space, returning how much we copied.
167 * We already checked that the event size is smaller than the
168 * buffer we had in "get_one_event()" above.
170 static ssize_t copy_event_to_user(struct fsnotify_group *group,
171 struct fsnotify_event *event,
172 char __user *buf)
174 struct inotify_event inotify_event;
175 struct fsnotify_event_private_data *fsn_priv;
176 struct inotify_event_private_data *priv;
177 size_t event_size = sizeof(struct inotify_event);
178 size_t name_len = 0;
180 /* we get the inotify watch descriptor from the event private data */
181 spin_lock(&event->lock);
182 fsn_priv = fsnotify_remove_priv_from_event(group, event);
183 spin_unlock(&event->lock);
185 if (!fsn_priv)
186 inotify_event.wd = -1;
187 else {
188 priv = container_of(fsn_priv, struct inotify_event_private_data,
189 fsnotify_event_priv_data);
190 inotify_event.wd = priv->wd;
191 inotify_free_event_priv(fsn_priv);
195 * round up event->name_len so it is a multiple of event_size
196 * plus an extra byte for the terminating '\0'.
198 if (event->name_len)
199 name_len = roundup(event->name_len + 1, event_size);
200 inotify_event.len = name_len;
202 inotify_event.mask = inotify_mask_to_arg(event->mask);
203 inotify_event.cookie = event->sync_cookie;
205 /* send the main event */
206 if (copy_to_user(buf, &inotify_event, event_size))
207 return -EFAULT;
209 buf += event_size;
212 * fsnotify only stores the pathname, so here we have to send the pathname
213 * and then pad that pathname out to a multiple of sizeof(inotify_event)
214 * with zeros. I get my zeros from the nul_inotify_event.
216 if (name_len) {
217 unsigned int len_to_zero = name_len - event->name_len;
218 /* copy the path name */
219 if (copy_to_user(buf, event->file_name, event->name_len))
220 return -EFAULT;
221 buf += event->name_len;
223 /* fill userspace with 0's */
224 if (clear_user(buf, len_to_zero))
225 return -EFAULT;
226 buf += len_to_zero;
227 event_size += name_len;
230 return event_size;
233 static ssize_t inotify_read(struct file *file, char __user *buf,
234 size_t count, loff_t *pos)
236 struct fsnotify_group *group;
237 struct fsnotify_event *kevent;
238 char __user *start;
239 int ret;
240 DEFINE_WAIT(wait);
242 start = buf;
243 group = file->private_data;
245 while (1) {
246 prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
248 mutex_lock(&group->notification_mutex);
249 kevent = get_one_event(group, count);
250 mutex_unlock(&group->notification_mutex);
252 if (kevent) {
253 ret = PTR_ERR(kevent);
254 if (IS_ERR(kevent))
255 break;
256 ret = copy_event_to_user(group, kevent, buf);
257 fsnotify_put_event(kevent);
258 if (ret < 0)
259 break;
260 buf += ret;
261 count -= ret;
262 continue;
265 ret = -EAGAIN;
266 if (file->f_flags & O_NONBLOCK)
267 break;
268 ret = -EINTR;
269 if (signal_pending(current))
270 break;
272 if (start != buf)
273 break;
275 schedule();
278 finish_wait(&group->notification_waitq, &wait);
279 if (start != buf && ret != -EFAULT)
280 ret = buf - start;
281 return ret;
284 static int inotify_fasync(int fd, struct file *file, int on)
286 struct fsnotify_group *group = file->private_data;
288 return fasync_helper(fd, file, on, &group->inotify_data.fa) >= 0 ? 0 : -EIO;
291 static int inotify_release(struct inode *ignored, struct file *file)
293 struct fsnotify_group *group = file->private_data;
294 struct user_struct *user = group->inotify_data.user;
296 fsnotify_clear_marks_by_group(group);
298 /* free this group, matching get was inotify_init->fsnotify_obtain_group */
299 fsnotify_put_group(group);
301 atomic_dec(&user->inotify_devs);
303 return 0;
306 static long inotify_ioctl(struct file *file, unsigned int cmd,
307 unsigned long arg)
309 struct fsnotify_group *group;
310 struct fsnotify_event_holder *holder;
311 struct fsnotify_event *event;
312 void __user *p;
313 int ret = -ENOTTY;
314 size_t send_len = 0;
316 group = file->private_data;
317 p = (void __user *) arg;
319 switch (cmd) {
320 case FIONREAD:
321 mutex_lock(&group->notification_mutex);
322 list_for_each_entry(holder, &group->notification_list, event_list) {
323 event = holder->event;
324 send_len += sizeof(struct inotify_event);
325 if (event->name_len)
326 send_len += roundup(event->name_len + 1,
327 sizeof(struct inotify_event));
329 mutex_unlock(&group->notification_mutex);
330 ret = put_user(send_len, (int __user *) p);
331 break;
334 return ret;
337 static const struct file_operations inotify_fops = {
338 .poll = inotify_poll,
339 .read = inotify_read,
340 .fasync = inotify_fasync,
341 .release = inotify_release,
342 .unlocked_ioctl = inotify_ioctl,
343 .compat_ioctl = inotify_ioctl,
348 * find_inode - resolve a user-given path to a specific inode
350 static int inotify_find_inode(const char __user *dirname, struct path *path, unsigned flags)
352 int error;
354 error = user_path_at(AT_FDCWD, dirname, flags, path);
355 if (error)
356 return error;
357 /* you can only watch an inode if you have read permissions on it */
358 error = inode_permission(path->dentry->d_inode, MAY_READ);
359 if (error)
360 path_put(path);
361 return error;
364 static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
365 int last_wd,
366 struct inotify_inode_mark_entry *ientry)
368 int ret;
370 do {
371 if (unlikely(!idr_pre_get(idr, GFP_KERNEL)))
372 return -ENOMEM;
374 spin_lock(idr_lock);
375 ret = idr_get_new_above(idr, ientry, last_wd + 1,
376 &ientry->wd);
377 /* we added the mark to the idr, take a reference */
378 if (!ret)
379 fsnotify_get_mark(&ientry->fsn_entry);
380 spin_unlock(idr_lock);
381 } while (ret == -EAGAIN);
383 return ret;
386 static struct inotify_inode_mark_entry *inotify_idr_find_locked(struct fsnotify_group *group,
387 int wd)
389 struct idr *idr = &group->inotify_data.idr;
390 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
391 struct inotify_inode_mark_entry *ientry;
393 assert_spin_locked(idr_lock);
395 ientry = idr_find(idr, wd);
396 if (ientry) {
397 struct fsnotify_mark_entry *fsn_entry = &ientry->fsn_entry;
399 fsnotify_get_mark(fsn_entry);
400 /* One ref for being in the idr, one ref we just took */
401 BUG_ON(atomic_read(&fsn_entry->refcnt) < 2);
404 return ientry;
407 static struct inotify_inode_mark_entry *inotify_idr_find(struct fsnotify_group *group,
408 int wd)
410 struct inotify_inode_mark_entry *ientry;
411 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
413 spin_lock(idr_lock);
414 ientry = inotify_idr_find_locked(group, wd);
415 spin_unlock(idr_lock);
417 return ientry;
420 static void do_inotify_remove_from_idr(struct fsnotify_group *group,
421 struct inotify_inode_mark_entry *ientry)
423 struct idr *idr = &group->inotify_data.idr;
424 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
425 int wd = ientry->wd;
427 assert_spin_locked(idr_lock);
429 idr_remove(idr, wd);
431 /* removed from the idr, drop that ref */
432 fsnotify_put_mark(&ientry->fsn_entry);
436 * Remove the mark from the idr (if present) and drop the reference
437 * on the mark because it was in the idr.
439 static void inotify_remove_from_idr(struct fsnotify_group *group,
440 struct inotify_inode_mark_entry *ientry)
442 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
443 struct inotify_inode_mark_entry *found_ientry = NULL;
444 int wd;
446 spin_lock(idr_lock);
447 wd = ientry->wd;
450 * does this ientry think it is in the idr? we shouldn't get called
451 * if it wasn't....
453 if (wd == -1) {
454 printk(KERN_WARNING "%s: ientry=%p ientry->wd=%d ientry->group=%p"
455 " ientry->inode=%p\n", __func__, ientry, ientry->wd,
456 ientry->fsn_entry.group, ientry->fsn_entry.inode);
457 WARN_ON(1);
458 goto out;
461 /* Lets look in the idr to see if we find it */
462 found_ientry = inotify_idr_find_locked(group, wd);
463 if (unlikely(!found_ientry)) {
464 printk(KERN_WARNING "%s: ientry=%p ientry->wd=%d ientry->group=%p"
465 " ientry->inode=%p\n", __func__, ientry, ientry->wd,
466 ientry->fsn_entry.group, ientry->fsn_entry.inode);
467 WARN_ON(1);
468 goto out;
472 * We found an entry in the idr at the right wd, but it's
473 * not the entry we were told to remove. eparis seriously
474 * fucked up somewhere.
476 if (unlikely(found_ientry != ientry)) {
477 WARN_ON(1);
478 printk(KERN_WARNING "%s: ientry=%p ientry->wd=%d ientry->group=%p "
479 "entry->inode=%p found_ientry=%p found_ientry->wd=%d "
480 "found_ientry->group=%p found_ientry->inode=%p\n",
481 __func__, ientry, ientry->wd, ientry->fsn_entry.group,
482 ientry->fsn_entry.inode, found_ientry, found_ientry->wd,
483 found_ientry->fsn_entry.group,
484 found_ientry->fsn_entry.inode);
485 goto out;
489 * One ref for being in the idr
490 * one ref held by the caller trying to kill us
491 * one ref grabbed by inotify_idr_find
493 if (unlikely(atomic_read(&ientry->fsn_entry.refcnt) < 3)) {
494 printk(KERN_WARNING "%s: ientry=%p ientry->wd=%d ientry->group=%p"
495 " ientry->inode=%p\n", __func__, ientry, ientry->wd,
496 ientry->fsn_entry.group, ientry->fsn_entry.inode);
497 /* we can't really recover with bad ref cnting.. */
498 BUG();
501 do_inotify_remove_from_idr(group, ientry);
502 out:
503 /* match the ref taken by inotify_idr_find_locked() */
504 if (found_ientry)
505 fsnotify_put_mark(&found_ientry->fsn_entry);
506 ientry->wd = -1;
507 spin_unlock(idr_lock);
511 * Send IN_IGNORED for this wd, remove this wd from the idr.
513 void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
514 struct fsnotify_group *group)
516 struct inotify_inode_mark_entry *ientry;
517 struct fsnotify_event *ignored_event;
518 struct inotify_event_private_data *event_priv;
519 struct fsnotify_event_private_data *fsn_event_priv;
520 int ret;
522 ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL,
523 FSNOTIFY_EVENT_NONE, NULL, 0,
524 GFP_NOFS);
525 if (!ignored_event)
526 return;
528 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
530 event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS);
531 if (unlikely(!event_priv))
532 goto skip_send_ignore;
534 fsn_event_priv = &event_priv->fsnotify_event_priv_data;
536 fsn_event_priv->group = group;
537 event_priv->wd = ientry->wd;
539 ret = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv, NULL);
540 if (ret)
541 inotify_free_event_priv(fsn_event_priv);
543 skip_send_ignore:
545 /* matches the reference taken when the event was created */
546 fsnotify_put_event(ignored_event);
548 /* remove this entry from the idr */
549 inotify_remove_from_idr(group, ientry);
551 atomic_dec(&group->inotify_data.user->inotify_watches);
554 /* ding dong the mark is dead */
555 static void inotify_free_mark(struct fsnotify_mark_entry *entry)
557 struct inotify_inode_mark_entry *ientry;
559 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
561 kmem_cache_free(inotify_inode_mark_cachep, ientry);
564 static int inotify_update_existing_watch(struct fsnotify_group *group,
565 struct inode *inode,
566 u32 arg)
568 struct fsnotify_mark_entry *entry;
569 struct inotify_inode_mark_entry *ientry;
570 __u32 old_mask, new_mask;
571 __u32 mask;
572 int add = (arg & IN_MASK_ADD);
573 int ret;
575 /* don't allow invalid bits: we don't want flags set */
576 mask = inotify_arg_to_mask(arg);
577 if (unlikely(!mask))
578 return -EINVAL;
580 spin_lock(&inode->i_lock);
581 entry = fsnotify_find_mark_entry(group, inode);
582 spin_unlock(&inode->i_lock);
583 if (!entry)
584 return -ENOENT;
586 ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
588 spin_lock(&entry->lock);
590 old_mask = entry->mask;
591 if (add) {
592 entry->mask |= mask;
593 new_mask = entry->mask;
594 } else {
595 entry->mask = mask;
596 new_mask = entry->mask;
599 spin_unlock(&entry->lock);
601 if (old_mask != new_mask) {
602 /* more bits in old than in new? */
603 int dropped = (old_mask & ~new_mask);
604 /* more bits in this entry than the inode's mask? */
605 int do_inode = (new_mask & ~inode->i_fsnotify_mask);
606 /* more bits in this entry than the group? */
607 int do_group = (new_mask & ~group->mask);
609 /* update the inode with this new entry */
610 if (dropped || do_inode)
611 fsnotify_recalc_inode_mask(inode);
613 /* update the group mask with the new mask */
614 if (dropped || do_group)
615 fsnotify_recalc_group_mask(group);
618 /* return the wd */
619 ret = ientry->wd;
621 /* match the get from fsnotify_find_mark_entry() */
622 fsnotify_put_mark(entry);
624 return ret;
627 static int inotify_new_watch(struct fsnotify_group *group,
628 struct inode *inode,
629 u32 arg)
631 struct inotify_inode_mark_entry *tmp_ientry;
632 __u32 mask;
633 int ret;
634 struct idr *idr = &group->inotify_data.idr;
635 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
637 /* don't allow invalid bits: we don't want flags set */
638 mask = inotify_arg_to_mask(arg);
639 if (unlikely(!mask))
640 return -EINVAL;
642 tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
643 if (unlikely(!tmp_ientry))
644 return -ENOMEM;
646 fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark);
647 tmp_ientry->fsn_entry.mask = mask;
648 tmp_ientry->wd = -1;
650 ret = -ENOSPC;
651 if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
652 goto out_err;
654 ret = inotify_add_to_idr(idr, idr_lock, group->inotify_data.last_wd,
655 tmp_ientry);
656 if (ret)
657 goto out_err;
659 /* we are on the idr, now get on the inode */
660 ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode, 0);
661 if (ret) {
662 /* we failed to get on the inode, get off the idr */
663 inotify_remove_from_idr(group, tmp_ientry);
664 goto out_err;
667 /* update the idr hint, who cares about races, it's just a hint */
668 group->inotify_data.last_wd = tmp_ientry->wd;
670 /* increment the number of watches the user has */
671 atomic_inc(&group->inotify_data.user->inotify_watches);
673 /* return the watch descriptor for this new entry */
674 ret = tmp_ientry->wd;
676 /* if this mark added a new event update the group mask */
677 if (mask & ~group->mask)
678 fsnotify_recalc_group_mask(group);
680 out_err:
681 /* match the ref from fsnotify_init_markentry() */
682 fsnotify_put_mark(&tmp_ientry->fsn_entry);
684 return ret;
687 static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
689 int ret = 0;
691 retry:
692 /* try to update and existing watch with the new arg */
693 ret = inotify_update_existing_watch(group, inode, arg);
694 /* no mark present, try to add a new one */
695 if (ret == -ENOENT)
696 ret = inotify_new_watch(group, inode, arg);
698 * inotify_new_watch could race with another thread which did an
699 * inotify_new_watch between the update_existing and the add watch
700 * here, go back and try to update an existing mark again.
702 if (ret == -EEXIST)
703 goto retry;
705 return ret;
708 static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsigned int max_events)
710 struct fsnotify_group *group;
712 group = fsnotify_obtain_group(0, &inotify_fsnotify_ops);
713 if (IS_ERR(group))
714 return group;
716 group->max_events = max_events;
718 spin_lock_init(&group->inotify_data.idr_lock);
719 idr_init(&group->inotify_data.idr);
720 group->inotify_data.last_wd = 0;
721 group->inotify_data.user = user;
722 group->inotify_data.fa = NULL;
724 return group;
728 /* inotify syscalls */
729 SYSCALL_DEFINE1(inotify_init1, int, flags)
731 struct fsnotify_group *group;
732 struct user_struct *user;
733 struct file *filp;
734 int fd, ret;
736 /* Check the IN_* constants for consistency. */
737 BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
738 BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);
740 if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
741 return -EINVAL;
743 fd = get_unused_fd_flags(flags & O_CLOEXEC);
744 if (fd < 0)
745 return fd;
747 filp = get_empty_filp();
748 if (!filp) {
749 ret = -ENFILE;
750 goto out_put_fd;
753 user = get_current_user();
754 if (unlikely(atomic_read(&user->inotify_devs) >=
755 inotify_max_user_instances)) {
756 ret = -EMFILE;
757 goto out_free_uid;
760 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
761 group = inotify_new_group(user, inotify_max_queued_events);
762 if (IS_ERR(group)) {
763 ret = PTR_ERR(group);
764 goto out_free_uid;
767 filp->f_op = &inotify_fops;
768 filp->f_path.mnt = mntget(inotify_mnt);
769 filp->f_path.dentry = dget(inotify_mnt->mnt_root);
770 filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping;
771 filp->f_mode = FMODE_READ;
772 filp->f_flags = O_RDONLY | (flags & O_NONBLOCK);
773 filp->private_data = group;
775 atomic_inc(&user->inotify_devs);
777 fd_install(fd, filp);
779 return fd;
781 out_free_uid:
782 free_uid(user);
783 put_filp(filp);
784 out_put_fd:
785 put_unused_fd(fd);
786 return ret;
789 SYSCALL_DEFINE0(inotify_init)
791 return sys_inotify_init1(0);
794 SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
795 u32, mask)
797 struct fsnotify_group *group;
798 struct inode *inode;
799 struct path path;
800 struct file *filp;
801 int ret, fput_needed;
802 unsigned flags = 0;
804 filp = fget_light(fd, &fput_needed);
805 if (unlikely(!filp))
806 return -EBADF;
808 /* verify that this is indeed an inotify instance */
809 if (unlikely(filp->f_op != &inotify_fops)) {
810 ret = -EINVAL;
811 goto fput_and_out;
814 if (!(mask & IN_DONT_FOLLOW))
815 flags |= LOOKUP_FOLLOW;
816 if (mask & IN_ONLYDIR)
817 flags |= LOOKUP_DIRECTORY;
819 ret = inotify_find_inode(pathname, &path, flags);
820 if (ret)
821 goto fput_and_out;
823 /* inode held in place by reference to path; group by fget on fd */
824 inode = path.dentry->d_inode;
825 group = filp->private_data;
827 /* create/update an inode mark */
828 ret = inotify_update_watch(group, inode, mask);
829 if (unlikely(ret))
830 goto path_put_and_out;
832 path_put_and_out:
833 path_put(&path);
834 fput_and_out:
835 fput_light(filp, fput_needed);
836 return ret;
839 SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
841 struct fsnotify_group *group;
842 struct inotify_inode_mark_entry *ientry;
843 struct file *filp;
844 int ret = 0, fput_needed;
846 filp = fget_light(fd, &fput_needed);
847 if (unlikely(!filp))
848 return -EBADF;
850 /* verify that this is indeed an inotify instance */
851 ret = -EINVAL;
852 if (unlikely(filp->f_op != &inotify_fops))
853 goto out;
855 group = filp->private_data;
857 ret = -EINVAL;
858 ientry = inotify_idr_find(group, wd);
859 if (unlikely(!ientry))
860 goto out;
862 ret = 0;
864 fsnotify_destroy_mark_by_entry(&ientry->fsn_entry);
866 /* match ref taken by inotify_idr_find */
867 fsnotify_put_mark(&ientry->fsn_entry);
869 out:
870 fput_light(filp, fput_needed);
871 return ret;
874 static int
875 inotify_get_sb(struct file_system_type *fs_type, int flags,
876 const char *dev_name, void *data, struct vfsmount *mnt)
878 return get_sb_pseudo(fs_type, "inotify", NULL,
879 INOTIFYFS_SUPER_MAGIC, mnt);
882 static struct file_system_type inotify_fs_type = {
883 .name = "inotifyfs",
884 .get_sb = inotify_get_sb,
885 .kill_sb = kill_anon_super,
889 * inotify_user_setup - Our initialization function. Note that we cannnot return
890 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
891 * must result in panic().
893 static int __init inotify_user_setup(void)
895 int ret;
897 ret = register_filesystem(&inotify_fs_type);
898 if (unlikely(ret))
899 panic("inotify: register_filesystem returned %d!\n", ret);
901 inotify_mnt = kern_mount(&inotify_fs_type);
902 if (IS_ERR(inotify_mnt))
903 panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt));
905 inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC);
906 event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC);
908 inotify_max_queued_events = 16384;
909 inotify_max_user_instances = 128;
910 inotify_max_user_watches = 8192;
912 return 0;
914 module_init(inotify_user_setup);