Linux 4.18.10
[linux/fpc-iii.git] / fs / notify / inotify / inotify_user.c
blob1cf5b779d862dc81f9b00454d06babd8fc3acb1f
1 /*
2 * fs/inotify_user.c - inotify support for userspace
4 * Authors:
5 * John McCutchan <ttb@tentacle.dhs.org>
6 * Robert Love <rml@novell.com>
8 * Copyright (C) 2005 John McCutchan
9 * Copyright 2006 Hewlett-Packard Development Company, L.P.
11 * Copyright (C) 2009 Eric Paris <Red Hat Inc>
12 * inotify was largely rewriten to make use of the fsnotify infrastructure
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2, or (at your option) any
17 * later version.
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
25 #include <linux/file.h>
26 #include <linux/fs.h> /* struct inode */
27 #include <linux/fsnotify_backend.h>
28 #include <linux/idr.h>
29 #include <linux/init.h> /* fs_initcall */
30 #include <linux/inotify.h>
31 #include <linux/kernel.h> /* roundup() */
32 #include <linux/namei.h> /* LOOKUP_FOLLOW */
33 #include <linux/sched/signal.h>
34 #include <linux/slab.h> /* struct kmem_cache */
35 #include <linux/syscalls.h>
36 #include <linux/types.h>
37 #include <linux/anon_inodes.h>
38 #include <linux/uaccess.h>
39 #include <linux/poll.h>
40 #include <linux/wait.h>
42 #include "inotify.h"
43 #include "../fdinfo.h"
45 #include <asm/ioctls.h>
47 /* configurable via /proc/sys/fs/inotify/ */
48 static int inotify_max_queued_events __read_mostly;
50 struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
52 #ifdef CONFIG_SYSCTL
54 #include <linux/sysctl.h>
56 static int zero;
58 struct ctl_table inotify_table[] = {
60 .procname = "max_user_instances",
61 .data = &init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES],
62 .maxlen = sizeof(int),
63 .mode = 0644,
64 .proc_handler = proc_dointvec_minmax,
65 .extra1 = &zero,
68 .procname = "max_user_watches",
69 .data = &init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES],
70 .maxlen = sizeof(int),
71 .mode = 0644,
72 .proc_handler = proc_dointvec_minmax,
73 .extra1 = &zero,
76 .procname = "max_queued_events",
77 .data = &inotify_max_queued_events,
78 .maxlen = sizeof(int),
79 .mode = 0644,
80 .proc_handler = proc_dointvec_minmax,
81 .extra1 = &zero
83 { }
85 #endif /* CONFIG_SYSCTL */
87 static inline __u32 inotify_arg_to_mask(u32 arg)
89 __u32 mask;
92 * everything should accept their own ignored, cares about children,
93 * and should receive events when the inode is unmounted
95 mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD | FS_UNMOUNT);
97 /* mask off the flags used to open the fd */
98 mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT | IN_EXCL_UNLINK));
100 return mask;
103 static inline u32 inotify_mask_to_arg(__u32 mask)
105 return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED |
106 IN_Q_OVERFLOW);
109 /* intofiy userspace file descriptor functions */
110 static __poll_t inotify_poll(struct file *file, poll_table *wait)
112 struct fsnotify_group *group = file->private_data;
113 __poll_t ret = 0;
115 poll_wait(file, &group->notification_waitq, wait);
116 spin_lock(&group->notification_lock);
117 if (!fsnotify_notify_queue_is_empty(group))
118 ret = EPOLLIN | EPOLLRDNORM;
119 spin_unlock(&group->notification_lock);
121 return ret;
124 static int round_event_name_len(struct fsnotify_event *fsn_event)
126 struct inotify_event_info *event;
128 event = INOTIFY_E(fsn_event);
129 if (!event->name_len)
130 return 0;
131 return roundup(event->name_len + 1, sizeof(struct inotify_event));
135 * Get an inotify_kernel_event if one exists and is small
136 * enough to fit in "count". Return an error pointer if
137 * not large enough.
139 * Called with the group->notification_lock held.
141 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
142 size_t count)
144 size_t event_size = sizeof(struct inotify_event);
145 struct fsnotify_event *event;
147 if (fsnotify_notify_queue_is_empty(group))
148 return NULL;
150 event = fsnotify_peek_first_event(group);
152 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
154 event_size += round_event_name_len(event);
155 if (event_size > count)
156 return ERR_PTR(-EINVAL);
158 /* held the notification_lock the whole time, so this is the
159 * same event we peeked above */
160 fsnotify_remove_first_event(group);
162 return event;
166 * Copy an event to user space, returning how much we copied.
168 * We already checked that the event size is smaller than the
169 * buffer we had in "get_one_event()" above.
171 static ssize_t copy_event_to_user(struct fsnotify_group *group,
172 struct fsnotify_event *fsn_event,
173 char __user *buf)
175 struct inotify_event inotify_event;
176 struct inotify_event_info *event;
177 size_t event_size = sizeof(struct inotify_event);
178 size_t name_len;
179 size_t pad_name_len;
181 pr_debug("%s: group=%p event=%p\n", __func__, group, fsn_event);
183 event = INOTIFY_E(fsn_event);
184 name_len = event->name_len;
186 * round up name length so it is a multiple of event_size
187 * plus an extra byte for the terminating '\0'.
189 pad_name_len = round_event_name_len(fsn_event);
190 inotify_event.len = pad_name_len;
191 inotify_event.mask = inotify_mask_to_arg(fsn_event->mask);
192 inotify_event.wd = event->wd;
193 inotify_event.cookie = event->sync_cookie;
195 /* send the main event */
196 if (copy_to_user(buf, &inotify_event, event_size))
197 return -EFAULT;
199 buf += event_size;
202 * fsnotify only stores the pathname, so here we have to send the pathname
203 * and then pad that pathname out to a multiple of sizeof(inotify_event)
204 * with zeros.
206 if (pad_name_len) {
207 /* copy the path name */
208 if (copy_to_user(buf, event->name, name_len))
209 return -EFAULT;
210 buf += name_len;
212 /* fill userspace with 0's */
213 if (clear_user(buf, pad_name_len - name_len))
214 return -EFAULT;
215 event_size += pad_name_len;
218 return event_size;
221 static ssize_t inotify_read(struct file *file, char __user *buf,
222 size_t count, loff_t *pos)
224 struct fsnotify_group *group;
225 struct fsnotify_event *kevent;
226 char __user *start;
227 int ret;
228 DEFINE_WAIT_FUNC(wait, woken_wake_function);
230 start = buf;
231 group = file->private_data;
233 add_wait_queue(&group->notification_waitq, &wait);
234 while (1) {
235 spin_lock(&group->notification_lock);
236 kevent = get_one_event(group, count);
237 spin_unlock(&group->notification_lock);
239 pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent);
241 if (kevent) {
242 ret = PTR_ERR(kevent);
243 if (IS_ERR(kevent))
244 break;
245 ret = copy_event_to_user(group, kevent, buf);
246 fsnotify_destroy_event(group, kevent);
247 if (ret < 0)
248 break;
249 buf += ret;
250 count -= ret;
251 continue;
254 ret = -EAGAIN;
255 if (file->f_flags & O_NONBLOCK)
256 break;
257 ret = -ERESTARTSYS;
258 if (signal_pending(current))
259 break;
261 if (start != buf)
262 break;
264 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
266 remove_wait_queue(&group->notification_waitq, &wait);
268 if (start != buf && ret != -EFAULT)
269 ret = buf - start;
270 return ret;
273 static int inotify_release(struct inode *ignored, struct file *file)
275 struct fsnotify_group *group = file->private_data;
277 pr_debug("%s: group=%p\n", __func__, group);
279 /* free this group, matching get was inotify_init->fsnotify_obtain_group */
280 fsnotify_destroy_group(group);
282 return 0;
285 static long inotify_ioctl(struct file *file, unsigned int cmd,
286 unsigned long arg)
288 struct fsnotify_group *group;
289 struct fsnotify_event *fsn_event;
290 void __user *p;
291 int ret = -ENOTTY;
292 size_t send_len = 0;
294 group = file->private_data;
295 p = (void __user *) arg;
297 pr_debug("%s: group=%p cmd=%u\n", __func__, group, cmd);
299 switch (cmd) {
300 case FIONREAD:
301 spin_lock(&group->notification_lock);
302 list_for_each_entry(fsn_event, &group->notification_list,
303 list) {
304 send_len += sizeof(struct inotify_event);
305 send_len += round_event_name_len(fsn_event);
307 spin_unlock(&group->notification_lock);
308 ret = put_user(send_len, (int __user *) p);
309 break;
310 #ifdef CONFIG_CHECKPOINT_RESTORE
311 case INOTIFY_IOC_SETNEXTWD:
312 ret = -EINVAL;
313 if (arg >= 1 && arg <= INT_MAX) {
314 struct inotify_group_private_data *data;
316 data = &group->inotify_data;
317 spin_lock(&data->idr_lock);
318 idr_set_cursor(&data->idr, (unsigned int)arg);
319 spin_unlock(&data->idr_lock);
320 ret = 0;
322 break;
323 #endif /* CONFIG_CHECKPOINT_RESTORE */
326 return ret;
329 static const struct file_operations inotify_fops = {
330 .show_fdinfo = inotify_show_fdinfo,
331 .poll = inotify_poll,
332 .read = inotify_read,
333 .fasync = fsnotify_fasync,
334 .release = inotify_release,
335 .unlocked_ioctl = inotify_ioctl,
336 .compat_ioctl = inotify_ioctl,
337 .llseek = noop_llseek,
342 * find_inode - resolve a user-given path to a specific inode
344 static int inotify_find_inode(const char __user *dirname, struct path *path, unsigned flags)
346 int error;
348 error = user_path_at(AT_FDCWD, dirname, flags, path);
349 if (error)
350 return error;
351 /* you can only watch an inode if you have read permissions on it */
352 error = inode_permission(path->dentry->d_inode, MAY_READ);
353 if (error)
354 path_put(path);
355 return error;
358 static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
359 struct inotify_inode_mark *i_mark)
361 int ret;
363 idr_preload(GFP_KERNEL);
364 spin_lock(idr_lock);
366 ret = idr_alloc_cyclic(idr, i_mark, 1, 0, GFP_NOWAIT);
367 if (ret >= 0) {
368 /* we added the mark to the idr, take a reference */
369 i_mark->wd = ret;
370 fsnotify_get_mark(&i_mark->fsn_mark);
373 spin_unlock(idr_lock);
374 idr_preload_end();
375 return ret < 0 ? ret : 0;
378 static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group,
379 int wd)
381 struct idr *idr = &group->inotify_data.idr;
382 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
383 struct inotify_inode_mark *i_mark;
385 assert_spin_locked(idr_lock);
387 i_mark = idr_find(idr, wd);
388 if (i_mark) {
389 struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark;
391 fsnotify_get_mark(fsn_mark);
392 /* One ref for being in the idr, one ref we just took */
393 BUG_ON(refcount_read(&fsn_mark->refcnt) < 2);
396 return i_mark;
399 static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group,
400 int wd)
402 struct inotify_inode_mark *i_mark;
403 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
405 spin_lock(idr_lock);
406 i_mark = inotify_idr_find_locked(group, wd);
407 spin_unlock(idr_lock);
409 return i_mark;
413 * Remove the mark from the idr (if present) and drop the reference
414 * on the mark because it was in the idr.
416 static void inotify_remove_from_idr(struct fsnotify_group *group,
417 struct inotify_inode_mark *i_mark)
419 struct idr *idr = &group->inotify_data.idr;
420 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
421 struct inotify_inode_mark *found_i_mark = NULL;
422 int wd;
424 spin_lock(idr_lock);
425 wd = i_mark->wd;
428 * does this i_mark think it is in the idr? we shouldn't get called
429 * if it wasn't....
431 if (wd == -1) {
432 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
433 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
434 goto out;
437 /* Lets look in the idr to see if we find it */
438 found_i_mark = inotify_idr_find_locked(group, wd);
439 if (unlikely(!found_i_mark)) {
440 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
441 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
442 goto out;
446 * We found an mark in the idr at the right wd, but it's
447 * not the mark we were told to remove. eparis seriously
448 * fucked up somewhere.
450 if (unlikely(found_i_mark != i_mark)) {
451 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p "
452 "found_i_mark=%p found_i_mark->wd=%d "
453 "found_i_mark->group=%p\n", __func__, i_mark,
454 i_mark->wd, i_mark->fsn_mark.group, found_i_mark,
455 found_i_mark->wd, found_i_mark->fsn_mark.group);
456 goto out;
460 * One ref for being in the idr
461 * one ref grabbed by inotify_idr_find
463 if (unlikely(refcount_read(&i_mark->fsn_mark.refcnt) < 2)) {
464 printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
465 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
466 /* we can't really recover with bad ref cnting.. */
467 BUG();
470 idr_remove(idr, wd);
471 /* Removed from the idr, drop that ref. */
472 fsnotify_put_mark(&i_mark->fsn_mark);
473 out:
474 i_mark->wd = -1;
475 spin_unlock(idr_lock);
476 /* match the ref taken by inotify_idr_find_locked() */
477 if (found_i_mark)
478 fsnotify_put_mark(&found_i_mark->fsn_mark);
482 * Send IN_IGNORED for this wd, remove this wd from the idr.
484 void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
485 struct fsnotify_group *group)
487 struct inotify_inode_mark *i_mark;
488 struct fsnotify_iter_info iter_info = { };
490 fsnotify_iter_set_report_type_mark(&iter_info, FSNOTIFY_OBJ_TYPE_INODE,
491 fsn_mark);
493 /* Queue ignore event for the watch */
494 inotify_handle_event(group, NULL, FS_IN_IGNORED, NULL,
495 FSNOTIFY_EVENT_NONE, NULL, 0, &iter_info);
497 i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
498 /* remove this mark from the idr */
499 inotify_remove_from_idr(group, i_mark);
501 dec_inotify_watches(group->inotify_data.ucounts);
504 static int inotify_update_existing_watch(struct fsnotify_group *group,
505 struct inode *inode,
506 u32 arg)
508 struct fsnotify_mark *fsn_mark;
509 struct inotify_inode_mark *i_mark;
510 __u32 old_mask, new_mask;
511 __u32 mask;
512 int add = (arg & IN_MASK_ADD);
513 int ret;
515 mask = inotify_arg_to_mask(arg);
517 fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
518 if (!fsn_mark)
519 return -ENOENT;
521 i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
523 spin_lock(&fsn_mark->lock);
524 old_mask = fsn_mark->mask;
525 if (add)
526 fsn_mark->mask |= mask;
527 else
528 fsn_mark->mask = mask;
529 new_mask = fsn_mark->mask;
530 spin_unlock(&fsn_mark->lock);
532 if (old_mask != new_mask) {
533 /* more bits in old than in new? */
534 int dropped = (old_mask & ~new_mask);
535 /* more bits in this fsn_mark than the inode's mask? */
536 int do_inode = (new_mask & ~inode->i_fsnotify_mask);
538 /* update the inode with this new fsn_mark */
539 if (dropped || do_inode)
540 fsnotify_recalc_mask(inode->i_fsnotify_marks);
544 /* return the wd */
545 ret = i_mark->wd;
547 /* match the get from fsnotify_find_mark() */
548 fsnotify_put_mark(fsn_mark);
550 return ret;
553 static int inotify_new_watch(struct fsnotify_group *group,
554 struct inode *inode,
555 u32 arg)
557 struct inotify_inode_mark *tmp_i_mark;
558 __u32 mask;
559 int ret;
560 struct idr *idr = &group->inotify_data.idr;
561 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
563 mask = inotify_arg_to_mask(arg);
565 tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
566 if (unlikely(!tmp_i_mark))
567 return -ENOMEM;
569 fsnotify_init_mark(&tmp_i_mark->fsn_mark, group);
570 tmp_i_mark->fsn_mark.mask = mask;
571 tmp_i_mark->wd = -1;
573 ret = inotify_add_to_idr(idr, idr_lock, tmp_i_mark);
574 if (ret)
575 goto out_err;
577 /* increment the number of watches the user has */
578 if (!inc_inotify_watches(group->inotify_data.ucounts)) {
579 inotify_remove_from_idr(group, tmp_i_mark);
580 ret = -ENOSPC;
581 goto out_err;
584 /* we are on the idr, now get on the inode */
585 ret = fsnotify_add_inode_mark_locked(&tmp_i_mark->fsn_mark, inode, 0);
586 if (ret) {
587 /* we failed to get on the inode, get off the idr */
588 inotify_remove_from_idr(group, tmp_i_mark);
589 goto out_err;
593 /* return the watch descriptor for this new mark */
594 ret = tmp_i_mark->wd;
596 out_err:
597 /* match the ref from fsnotify_init_mark() */
598 fsnotify_put_mark(&tmp_i_mark->fsn_mark);
600 return ret;
603 static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
605 int ret = 0;
607 mutex_lock(&group->mark_mutex);
608 /* try to update and existing watch with the new arg */
609 ret = inotify_update_existing_watch(group, inode, arg);
610 /* no mark present, try to add a new one */
611 if (ret == -ENOENT)
612 ret = inotify_new_watch(group, inode, arg);
613 mutex_unlock(&group->mark_mutex);
615 return ret;
618 static struct fsnotify_group *inotify_new_group(unsigned int max_events)
620 struct fsnotify_group *group;
621 struct inotify_event_info *oevent;
623 group = fsnotify_alloc_group(&inotify_fsnotify_ops);
624 if (IS_ERR(group))
625 return group;
627 oevent = kmalloc(sizeof(struct inotify_event_info), GFP_KERNEL);
628 if (unlikely(!oevent)) {
629 fsnotify_destroy_group(group);
630 return ERR_PTR(-ENOMEM);
632 group->overflow_event = &oevent->fse;
633 fsnotify_init_event(group->overflow_event, NULL, FS_Q_OVERFLOW);
634 oevent->wd = -1;
635 oevent->sync_cookie = 0;
636 oevent->name_len = 0;
638 group->max_events = max_events;
640 spin_lock_init(&group->inotify_data.idr_lock);
641 idr_init(&group->inotify_data.idr);
642 group->inotify_data.ucounts = inc_ucount(current_user_ns(),
643 current_euid(),
644 UCOUNT_INOTIFY_INSTANCES);
646 if (!group->inotify_data.ucounts) {
647 fsnotify_destroy_group(group);
648 return ERR_PTR(-EMFILE);
651 return group;
655 /* inotify syscalls */
656 static int do_inotify_init(int flags)
658 struct fsnotify_group *group;
659 int ret;
661 /* Check the IN_* constants for consistency. */
662 BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
663 BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);
665 if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
666 return -EINVAL;
668 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
669 group = inotify_new_group(inotify_max_queued_events);
670 if (IS_ERR(group))
671 return PTR_ERR(group);
673 ret = anon_inode_getfd("inotify", &inotify_fops, group,
674 O_RDONLY | flags);
675 if (ret < 0)
676 fsnotify_destroy_group(group);
678 return ret;
681 SYSCALL_DEFINE1(inotify_init1, int, flags)
683 return do_inotify_init(flags);
686 SYSCALL_DEFINE0(inotify_init)
688 return do_inotify_init(0);
691 SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
692 u32, mask)
694 struct fsnotify_group *group;
695 struct inode *inode;
696 struct path path;
697 struct fd f;
698 int ret;
699 unsigned flags = 0;
702 * We share a lot of code with fs/dnotify. We also share
703 * the bit layout between inotify's IN_* and the fsnotify
704 * FS_*. This check ensures that only the inotify IN_*
705 * bits get passed in and set in watches/events.
707 if (unlikely(mask & ~ALL_INOTIFY_BITS))
708 return -EINVAL;
710 * Require at least one valid bit set in the mask.
711 * Without _something_ set, we would have no events to
712 * watch for.
714 if (unlikely(!(mask & ALL_INOTIFY_BITS)))
715 return -EINVAL;
717 f = fdget(fd);
718 if (unlikely(!f.file))
719 return -EBADF;
721 /* verify that this is indeed an inotify instance */
722 if (unlikely(f.file->f_op != &inotify_fops)) {
723 ret = -EINVAL;
724 goto fput_and_out;
727 if (!(mask & IN_DONT_FOLLOW))
728 flags |= LOOKUP_FOLLOW;
729 if (mask & IN_ONLYDIR)
730 flags |= LOOKUP_DIRECTORY;
732 ret = inotify_find_inode(pathname, &path, flags);
733 if (ret)
734 goto fput_and_out;
736 /* inode held in place by reference to path; group by fget on fd */
737 inode = path.dentry->d_inode;
738 group = f.file->private_data;
740 /* create/update an inode mark */
741 ret = inotify_update_watch(group, inode, mask);
742 path_put(&path);
743 fput_and_out:
744 fdput(f);
745 return ret;
748 SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
750 struct fsnotify_group *group;
751 struct inotify_inode_mark *i_mark;
752 struct fd f;
753 int ret = 0;
755 f = fdget(fd);
756 if (unlikely(!f.file))
757 return -EBADF;
759 /* verify that this is indeed an inotify instance */
760 ret = -EINVAL;
761 if (unlikely(f.file->f_op != &inotify_fops))
762 goto out;
764 group = f.file->private_data;
766 ret = -EINVAL;
767 i_mark = inotify_idr_find(group, wd);
768 if (unlikely(!i_mark))
769 goto out;
771 ret = 0;
773 fsnotify_destroy_mark(&i_mark->fsn_mark, group);
775 /* match ref taken by inotify_idr_find */
776 fsnotify_put_mark(&i_mark->fsn_mark);
778 out:
779 fdput(f);
780 return ret;
784 * inotify_user_setup - Our initialization function. Note that we cannot return
785 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
786 * must result in panic().
788 static int __init inotify_user_setup(void)
790 BUILD_BUG_ON(IN_ACCESS != FS_ACCESS);
791 BUILD_BUG_ON(IN_MODIFY != FS_MODIFY);
792 BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB);
793 BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE);
794 BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
795 BUILD_BUG_ON(IN_OPEN != FS_OPEN);
796 BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM);
797 BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO);
798 BUILD_BUG_ON(IN_CREATE != FS_CREATE);
799 BUILD_BUG_ON(IN_DELETE != FS_DELETE);
800 BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF);
801 BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF);
802 BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT);
803 BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW);
804 BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED);
805 BUILD_BUG_ON(IN_EXCL_UNLINK != FS_EXCL_UNLINK);
806 BUILD_BUG_ON(IN_ISDIR != FS_ISDIR);
807 BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT);
809 BUG_ON(hweight32(ALL_INOTIFY_BITS) != 21);
811 inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark, SLAB_PANIC);
813 inotify_max_queued_events = 16384;
814 init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES] = 128;
815 init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES] = 8192;
817 return 0;
819 fs_initcall(inotify_user_setup);