Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux/fpc-iii.git] / fs / notify / fanotify / fanotify_user.c
blob287a22c041496a206d94daa299be55a20b4786a2
1 #include <linux/fanotify.h>
2 #include <linux/fcntl.h>
3 #include <linux/file.h>
4 #include <linux/fs.h>
5 #include <linux/anon_inodes.h>
6 #include <linux/fsnotify_backend.h>
7 #include <linux/init.h>
8 #include <linux/mount.h>
9 #include <linux/namei.h>
10 #include <linux/poll.h>
11 #include <linux/security.h>
12 #include <linux/syscalls.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/uaccess.h>
16 #include <linux/compat.h>
18 #include <asm/ioctls.h>
20 #include "../../mount.h"
21 #include "../fdinfo.h"
22 #include "fanotify.h"
24 #define FANOTIFY_DEFAULT_MAX_EVENTS 16384
25 #define FANOTIFY_DEFAULT_MAX_MARKS 8192
26 #define FANOTIFY_DEFAULT_MAX_LISTENERS 128
28 extern const struct fsnotify_ops fanotify_fsnotify_ops;
30 static struct kmem_cache *fanotify_mark_cache __read_mostly;
31 static struct kmem_cache *fanotify_response_event_cache __read_mostly;
32 struct kmem_cache *fanotify_event_cachep __read_mostly;
34 struct fanotify_response_event {
35 struct list_head list;
36 __s32 fd;
37 struct fanotify_event_info *event;
41 * Get an fsnotify notification event if one exists and is small
42 * enough to fit in "count". Return an error pointer if the count
43 * is not large enough.
45 * Called with the group->notification_mutex held.
47 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
48 size_t count)
50 BUG_ON(!mutex_is_locked(&group->notification_mutex));
52 pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
54 if (fsnotify_notify_queue_is_empty(group))
55 return NULL;
57 if (FAN_EVENT_METADATA_LEN > count)
58 return ERR_PTR(-EINVAL);
60 /* held the notification_mutex the whole time, so this is the
61 * same event we peeked above */
62 return fsnotify_remove_notify_event(group);
65 static int create_fd(struct fsnotify_group *group,
66 struct fanotify_event_info *event,
67 struct file **file)
69 int client_fd;
70 struct file *new_file;
72 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
74 client_fd = get_unused_fd();
75 if (client_fd < 0)
76 return client_fd;
79 * we need a new file handle for the userspace program so it can read even if it was
80 * originally opened O_WRONLY.
82 /* it's possible this event was an overflow event. in that case dentry and mnt
83 * are NULL; That's fine, just don't call dentry open */
84 if (event->path.dentry && event->path.mnt)
85 new_file = dentry_open(&event->path,
86 group->fanotify_data.f_flags | FMODE_NONOTIFY,
87 current_cred());
88 else
89 new_file = ERR_PTR(-EOVERFLOW);
90 if (IS_ERR(new_file)) {
92 * we still send an event even if we can't open the file. this
93 * can happen when say tasks are gone and we try to open their
94 * /proc files or we try to open a WRONLY file like in sysfs
95 * we just send the errno to userspace since there isn't much
96 * else we can do.
98 put_unused_fd(client_fd);
99 client_fd = PTR_ERR(new_file);
100 } else {
101 *file = new_file;
104 return client_fd;
107 static int fill_event_metadata(struct fsnotify_group *group,
108 struct fanotify_event_metadata *metadata,
109 struct fsnotify_event *fsn_event,
110 struct file **file)
112 int ret = 0;
113 struct fanotify_event_info *event;
115 pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
116 group, metadata, fsn_event);
118 *file = NULL;
119 event = container_of(fsn_event, struct fanotify_event_info, fse);
120 metadata->event_len = FAN_EVENT_METADATA_LEN;
121 metadata->metadata_len = FAN_EVENT_METADATA_LEN;
122 metadata->vers = FANOTIFY_METADATA_VERSION;
123 metadata->reserved = 0;
124 metadata->mask = fsn_event->mask & FAN_ALL_OUTGOING_EVENTS;
125 metadata->pid = pid_vnr(event->tgid);
126 if (unlikely(fsn_event->mask & FAN_Q_OVERFLOW))
127 metadata->fd = FAN_NOFD;
128 else {
129 metadata->fd = create_fd(group, event, file);
130 if (metadata->fd < 0)
131 ret = metadata->fd;
134 return ret;
137 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
138 static struct fanotify_response_event *dequeue_re(struct fsnotify_group *group,
139 __s32 fd)
141 struct fanotify_response_event *re, *return_re = NULL;
143 mutex_lock(&group->fanotify_data.access_mutex);
144 list_for_each_entry(re, &group->fanotify_data.access_list, list) {
145 if (re->fd != fd)
146 continue;
148 list_del_init(&re->list);
149 return_re = re;
150 break;
152 mutex_unlock(&group->fanotify_data.access_mutex);
154 pr_debug("%s: found return_re=%p\n", __func__, return_re);
156 return return_re;
159 static int process_access_response(struct fsnotify_group *group,
160 struct fanotify_response *response_struct)
162 struct fanotify_response_event *re;
163 __s32 fd = response_struct->fd;
164 __u32 response = response_struct->response;
166 pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
167 fd, response);
169 * make sure the response is valid, if invalid we do nothing and either
170 * userspace can send a valid response or we will clean it up after the
171 * timeout
173 switch (response) {
174 case FAN_ALLOW:
175 case FAN_DENY:
176 break;
177 default:
178 return -EINVAL;
181 if (fd < 0)
182 return -EINVAL;
184 re = dequeue_re(group, fd);
185 if (!re)
186 return -ENOENT;
188 re->event->response = response;
190 wake_up(&group->fanotify_data.access_waitq);
192 kmem_cache_free(fanotify_response_event_cache, re);
194 return 0;
197 static int prepare_for_access_response(struct fsnotify_group *group,
198 struct fsnotify_event *event,
199 __s32 fd)
201 struct fanotify_response_event *re;
203 if (!(event->mask & FAN_ALL_PERM_EVENTS))
204 return 0;
206 re = kmem_cache_alloc(fanotify_response_event_cache, GFP_KERNEL);
207 if (!re)
208 return -ENOMEM;
210 re->event = FANOTIFY_E(event);
211 re->fd = fd;
213 mutex_lock(&group->fanotify_data.access_mutex);
215 if (atomic_read(&group->fanotify_data.bypass_perm)) {
216 mutex_unlock(&group->fanotify_data.access_mutex);
217 kmem_cache_free(fanotify_response_event_cache, re);
218 FANOTIFY_E(event)->response = FAN_ALLOW;
219 return 0;
222 list_add_tail(&re->list, &group->fanotify_data.access_list);
223 mutex_unlock(&group->fanotify_data.access_mutex);
225 return 0;
228 #else
229 static int prepare_for_access_response(struct fsnotify_group *group,
230 struct fsnotify_event *event,
231 __s32 fd)
233 return 0;
236 #endif
238 static ssize_t copy_event_to_user(struct fsnotify_group *group,
239 struct fsnotify_event *event,
240 char __user *buf)
242 struct fanotify_event_metadata fanotify_event_metadata;
243 struct file *f;
244 int fd, ret;
246 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
248 ret = fill_event_metadata(group, &fanotify_event_metadata, event, &f);
249 if (ret < 0)
250 goto out;
252 fd = fanotify_event_metadata.fd;
253 ret = -EFAULT;
254 if (copy_to_user(buf, &fanotify_event_metadata,
255 fanotify_event_metadata.event_len))
256 goto out_close_fd;
258 ret = prepare_for_access_response(group, event, fd);
259 if (ret)
260 goto out_close_fd;
262 if (fd != FAN_NOFD)
263 fd_install(fd, f);
264 return fanotify_event_metadata.event_len;
266 out_close_fd:
267 if (fd != FAN_NOFD) {
268 put_unused_fd(fd);
269 fput(f);
271 out:
272 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
273 if (event->mask & FAN_ALL_PERM_EVENTS) {
274 FANOTIFY_E(event)->response = FAN_DENY;
275 wake_up(&group->fanotify_data.access_waitq);
277 #endif
278 return ret;
281 /* intofiy userspace file descriptor functions */
282 static unsigned int fanotify_poll(struct file *file, poll_table *wait)
284 struct fsnotify_group *group = file->private_data;
285 int ret = 0;
287 poll_wait(file, &group->notification_waitq, wait);
288 mutex_lock(&group->notification_mutex);
289 if (!fsnotify_notify_queue_is_empty(group))
290 ret = POLLIN | POLLRDNORM;
291 mutex_unlock(&group->notification_mutex);
293 return ret;
296 static ssize_t fanotify_read(struct file *file, char __user *buf,
297 size_t count, loff_t *pos)
299 struct fsnotify_group *group;
300 struct fsnotify_event *kevent;
301 char __user *start;
302 int ret;
303 DEFINE_WAIT(wait);
305 start = buf;
306 group = file->private_data;
308 pr_debug("%s: group=%p\n", __func__, group);
310 while (1) {
311 prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
313 mutex_lock(&group->notification_mutex);
314 kevent = get_one_event(group, count);
315 mutex_unlock(&group->notification_mutex);
317 if (kevent) {
318 ret = PTR_ERR(kevent);
319 if (IS_ERR(kevent))
320 break;
321 ret = copy_event_to_user(group, kevent, buf);
323 * Permission events get destroyed after we
324 * receive response
326 if (!(kevent->mask & FAN_ALL_PERM_EVENTS))
327 fsnotify_destroy_event(group, kevent);
328 if (ret < 0)
329 break;
330 buf += ret;
331 count -= ret;
332 continue;
335 ret = -EAGAIN;
336 if (file->f_flags & O_NONBLOCK)
337 break;
338 ret = -ERESTARTSYS;
339 if (signal_pending(current))
340 break;
342 if (start != buf)
343 break;
345 schedule();
348 finish_wait(&group->notification_waitq, &wait);
349 if (start != buf && ret != -EFAULT)
350 ret = buf - start;
351 return ret;
354 static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
356 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
357 struct fanotify_response response = { .fd = -1, .response = -1 };
358 struct fsnotify_group *group;
359 int ret;
361 group = file->private_data;
363 if (count > sizeof(response))
364 count = sizeof(response);
366 pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
368 if (copy_from_user(&response, buf, count))
369 return -EFAULT;
371 ret = process_access_response(group, &response);
372 if (ret < 0)
373 count = ret;
375 return count;
376 #else
377 return -EINVAL;
378 #endif
381 static int fanotify_release(struct inode *ignored, struct file *file)
383 struct fsnotify_group *group = file->private_data;
385 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
386 struct fanotify_response_event *re, *lre;
388 mutex_lock(&group->fanotify_data.access_mutex);
390 atomic_inc(&group->fanotify_data.bypass_perm);
392 list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) {
393 pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group,
394 re, re->event);
396 list_del_init(&re->list);
397 re->event->response = FAN_ALLOW;
399 kmem_cache_free(fanotify_response_event_cache, re);
401 mutex_unlock(&group->fanotify_data.access_mutex);
403 wake_up(&group->fanotify_data.access_waitq);
404 #endif
406 /* matches the fanotify_init->fsnotify_alloc_group */
407 fsnotify_destroy_group(group);
409 return 0;
412 static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
414 struct fsnotify_group *group;
415 struct fsnotify_event *fsn_event;
416 void __user *p;
417 int ret = -ENOTTY;
418 size_t send_len = 0;
420 group = file->private_data;
422 p = (void __user *) arg;
424 switch (cmd) {
425 case FIONREAD:
426 mutex_lock(&group->notification_mutex);
427 list_for_each_entry(fsn_event, &group->notification_list, list)
428 send_len += FAN_EVENT_METADATA_LEN;
429 mutex_unlock(&group->notification_mutex);
430 ret = put_user(send_len, (int __user *) p);
431 break;
434 return ret;
437 static const struct file_operations fanotify_fops = {
438 .show_fdinfo = fanotify_show_fdinfo,
439 .poll = fanotify_poll,
440 .read = fanotify_read,
441 .write = fanotify_write,
442 .fasync = NULL,
443 .release = fanotify_release,
444 .unlocked_ioctl = fanotify_ioctl,
445 .compat_ioctl = fanotify_ioctl,
446 .llseek = noop_llseek,
449 static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
451 kmem_cache_free(fanotify_mark_cache, fsn_mark);
454 static int fanotify_find_path(int dfd, const char __user *filename,
455 struct path *path, unsigned int flags)
457 int ret;
459 pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
460 dfd, filename, flags);
462 if (filename == NULL) {
463 struct fd f = fdget(dfd);
465 ret = -EBADF;
466 if (!f.file)
467 goto out;
469 ret = -ENOTDIR;
470 if ((flags & FAN_MARK_ONLYDIR) &&
471 !(S_ISDIR(file_inode(f.file)->i_mode))) {
472 fdput(f);
473 goto out;
476 *path = f.file->f_path;
477 path_get(path);
478 fdput(f);
479 } else {
480 unsigned int lookup_flags = 0;
482 if (!(flags & FAN_MARK_DONT_FOLLOW))
483 lookup_flags |= LOOKUP_FOLLOW;
484 if (flags & FAN_MARK_ONLYDIR)
485 lookup_flags |= LOOKUP_DIRECTORY;
487 ret = user_path_at(dfd, filename, lookup_flags, path);
488 if (ret)
489 goto out;
492 /* you can only watch an inode if you have read permissions on it */
493 ret = inode_permission(path->dentry->d_inode, MAY_READ);
494 if (ret)
495 path_put(path);
496 out:
497 return ret;
500 static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
501 __u32 mask,
502 unsigned int flags,
503 int *destroy)
505 __u32 oldmask;
507 spin_lock(&fsn_mark->lock);
508 if (!(flags & FAN_MARK_IGNORED_MASK)) {
509 oldmask = fsn_mark->mask;
510 fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask));
511 } else {
512 oldmask = fsn_mark->ignored_mask;
513 fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask));
515 spin_unlock(&fsn_mark->lock);
517 *destroy = !(oldmask & ~mask);
519 return mask & oldmask;
522 static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
523 struct vfsmount *mnt, __u32 mask,
524 unsigned int flags)
526 struct fsnotify_mark *fsn_mark = NULL;
527 __u32 removed;
528 int destroy_mark;
530 mutex_lock(&group->mark_mutex);
531 fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
532 if (!fsn_mark) {
533 mutex_unlock(&group->mark_mutex);
534 return -ENOENT;
537 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
538 &destroy_mark);
539 if (destroy_mark)
540 fsnotify_destroy_mark_locked(fsn_mark, group);
541 mutex_unlock(&group->mark_mutex);
543 fsnotify_put_mark(fsn_mark);
544 if (removed & real_mount(mnt)->mnt_fsnotify_mask)
545 fsnotify_recalc_vfsmount_mask(mnt);
547 return 0;
550 static int fanotify_remove_inode_mark(struct fsnotify_group *group,
551 struct inode *inode, __u32 mask,
552 unsigned int flags)
554 struct fsnotify_mark *fsn_mark = NULL;
555 __u32 removed;
556 int destroy_mark;
558 mutex_lock(&group->mark_mutex);
559 fsn_mark = fsnotify_find_inode_mark(group, inode);
560 if (!fsn_mark) {
561 mutex_unlock(&group->mark_mutex);
562 return -ENOENT;
565 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
566 &destroy_mark);
567 if (destroy_mark)
568 fsnotify_destroy_mark_locked(fsn_mark, group);
569 mutex_unlock(&group->mark_mutex);
571 /* matches the fsnotify_find_inode_mark() */
572 fsnotify_put_mark(fsn_mark);
573 if (removed & inode->i_fsnotify_mask)
574 fsnotify_recalc_inode_mask(inode);
576 return 0;
579 static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
580 __u32 mask,
581 unsigned int flags)
583 __u32 oldmask = -1;
585 spin_lock(&fsn_mark->lock);
586 if (!(flags & FAN_MARK_IGNORED_MASK)) {
587 oldmask = fsn_mark->mask;
588 fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask));
589 } else {
590 __u32 tmask = fsn_mark->ignored_mask | mask;
591 fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
592 if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
593 fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
596 if (!(flags & FAN_MARK_ONDIR)) {
597 __u32 tmask = fsn_mark->ignored_mask | FAN_ONDIR;
598 fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
601 spin_unlock(&fsn_mark->lock);
603 return mask & ~oldmask;
606 static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group,
607 struct inode *inode,
608 struct vfsmount *mnt)
610 struct fsnotify_mark *mark;
611 int ret;
613 if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
614 return ERR_PTR(-ENOSPC);
616 mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
617 if (!mark)
618 return ERR_PTR(-ENOMEM);
620 fsnotify_init_mark(mark, fanotify_free_mark);
621 ret = fsnotify_add_mark_locked(mark, group, inode, mnt, 0);
622 if (ret) {
623 fsnotify_put_mark(mark);
624 return ERR_PTR(ret);
627 return mark;
631 static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
632 struct vfsmount *mnt, __u32 mask,
633 unsigned int flags)
635 struct fsnotify_mark *fsn_mark;
636 __u32 added;
638 mutex_lock(&group->mark_mutex);
639 fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
640 if (!fsn_mark) {
641 fsn_mark = fanotify_add_new_mark(group, NULL, mnt);
642 if (IS_ERR(fsn_mark)) {
643 mutex_unlock(&group->mark_mutex);
644 return PTR_ERR(fsn_mark);
647 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
648 mutex_unlock(&group->mark_mutex);
650 if (added & ~real_mount(mnt)->mnt_fsnotify_mask)
651 fsnotify_recalc_vfsmount_mask(mnt);
653 fsnotify_put_mark(fsn_mark);
654 return 0;
657 static int fanotify_add_inode_mark(struct fsnotify_group *group,
658 struct inode *inode, __u32 mask,
659 unsigned int flags)
661 struct fsnotify_mark *fsn_mark;
662 __u32 added;
664 pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
667 * If some other task has this inode open for write we should not add
668 * an ignored mark, unless that ignored mark is supposed to survive
669 * modification changes anyway.
671 if ((flags & FAN_MARK_IGNORED_MASK) &&
672 !(flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
673 (atomic_read(&inode->i_writecount) > 0))
674 return 0;
676 mutex_lock(&group->mark_mutex);
677 fsn_mark = fsnotify_find_inode_mark(group, inode);
678 if (!fsn_mark) {
679 fsn_mark = fanotify_add_new_mark(group, inode, NULL);
680 if (IS_ERR(fsn_mark)) {
681 mutex_unlock(&group->mark_mutex);
682 return PTR_ERR(fsn_mark);
685 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
686 mutex_unlock(&group->mark_mutex);
688 if (added & ~inode->i_fsnotify_mask)
689 fsnotify_recalc_inode_mask(inode);
691 fsnotify_put_mark(fsn_mark);
692 return 0;
695 /* fanotify syscalls */
696 SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
698 struct fsnotify_group *group;
699 int f_flags, fd;
700 struct user_struct *user;
701 struct fanotify_event_info *oevent;
703 pr_debug("%s: flags=%d event_f_flags=%d\n",
704 __func__, flags, event_f_flags);
706 if (!capable(CAP_SYS_ADMIN))
707 return -EPERM;
709 if (flags & ~FAN_ALL_INIT_FLAGS)
710 return -EINVAL;
712 user = get_current_user();
713 if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) {
714 free_uid(user);
715 return -EMFILE;
718 f_flags = O_RDWR | FMODE_NONOTIFY;
719 if (flags & FAN_CLOEXEC)
720 f_flags |= O_CLOEXEC;
721 if (flags & FAN_NONBLOCK)
722 f_flags |= O_NONBLOCK;
724 /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */
725 group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
726 if (IS_ERR(group)) {
727 free_uid(user);
728 return PTR_ERR(group);
731 group->fanotify_data.user = user;
732 atomic_inc(&user->fanotify_listeners);
734 oevent = kmem_cache_alloc(fanotify_event_cachep, GFP_KERNEL);
735 if (unlikely(!oevent)) {
736 fd = -ENOMEM;
737 goto out_destroy_group;
739 group->overflow_event = &oevent->fse;
740 fsnotify_init_event(group->overflow_event, NULL, FS_Q_OVERFLOW);
741 oevent->tgid = get_pid(task_tgid(current));
742 oevent->path.mnt = NULL;
743 oevent->path.dentry = NULL;
745 group->fanotify_data.f_flags = event_f_flags;
746 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
747 oevent->response = 0;
748 mutex_init(&group->fanotify_data.access_mutex);
749 init_waitqueue_head(&group->fanotify_data.access_waitq);
750 INIT_LIST_HEAD(&group->fanotify_data.access_list);
751 atomic_set(&group->fanotify_data.bypass_perm, 0);
752 #endif
753 switch (flags & FAN_ALL_CLASS_BITS) {
754 case FAN_CLASS_NOTIF:
755 group->priority = FS_PRIO_0;
756 break;
757 case FAN_CLASS_CONTENT:
758 group->priority = FS_PRIO_1;
759 break;
760 case FAN_CLASS_PRE_CONTENT:
761 group->priority = FS_PRIO_2;
762 break;
763 default:
764 fd = -EINVAL;
765 goto out_destroy_group;
768 if (flags & FAN_UNLIMITED_QUEUE) {
769 fd = -EPERM;
770 if (!capable(CAP_SYS_ADMIN))
771 goto out_destroy_group;
772 group->max_events = UINT_MAX;
773 } else {
774 group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS;
777 if (flags & FAN_UNLIMITED_MARKS) {
778 fd = -EPERM;
779 if (!capable(CAP_SYS_ADMIN))
780 goto out_destroy_group;
781 group->fanotify_data.max_marks = UINT_MAX;
782 } else {
783 group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS;
786 fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
787 if (fd < 0)
788 goto out_destroy_group;
790 return fd;
792 out_destroy_group:
793 fsnotify_destroy_group(group);
794 return fd;
797 SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags,
798 __u64, mask, int, dfd,
799 const char __user *, pathname)
801 struct inode *inode = NULL;
802 struct vfsmount *mnt = NULL;
803 struct fsnotify_group *group;
804 struct fd f;
805 struct path path;
806 int ret;
808 pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
809 __func__, fanotify_fd, flags, dfd, pathname, mask);
811 /* we only use the lower 32 bits as of right now. */
812 if (mask & ((__u64)0xffffffff << 32))
813 return -EINVAL;
815 if (flags & ~FAN_ALL_MARK_FLAGS)
816 return -EINVAL;
817 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
818 case FAN_MARK_ADD: /* fallthrough */
819 case FAN_MARK_REMOVE:
820 if (!mask)
821 return -EINVAL;
822 case FAN_MARK_FLUSH:
823 break;
824 default:
825 return -EINVAL;
828 if (mask & FAN_ONDIR) {
829 flags |= FAN_MARK_ONDIR;
830 mask &= ~FAN_ONDIR;
833 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
834 if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD))
835 #else
836 if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD))
837 #endif
838 return -EINVAL;
840 f = fdget(fanotify_fd);
841 if (unlikely(!f.file))
842 return -EBADF;
844 /* verify that this is indeed an fanotify instance */
845 ret = -EINVAL;
846 if (unlikely(f.file->f_op != &fanotify_fops))
847 goto fput_and_out;
848 group = f.file->private_data;
851 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF. These are not
852 * allowed to set permissions events.
854 ret = -EINVAL;
855 if (mask & FAN_ALL_PERM_EVENTS &&
856 group->priority == FS_PRIO_0)
857 goto fput_and_out;
859 ret = fanotify_find_path(dfd, pathname, &path, flags);
860 if (ret)
861 goto fput_and_out;
863 /* inode held in place by reference to path; group by fget on fd */
864 if (!(flags & FAN_MARK_MOUNT))
865 inode = path.dentry->d_inode;
866 else
867 mnt = path.mnt;
869 /* create/update an inode mark */
870 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
871 case FAN_MARK_ADD:
872 if (flags & FAN_MARK_MOUNT)
873 ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
874 else
875 ret = fanotify_add_inode_mark(group, inode, mask, flags);
876 break;
877 case FAN_MARK_REMOVE:
878 if (flags & FAN_MARK_MOUNT)
879 ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
880 else
881 ret = fanotify_remove_inode_mark(group, inode, mask, flags);
882 break;
883 case FAN_MARK_FLUSH:
884 if (flags & FAN_MARK_MOUNT)
885 fsnotify_clear_vfsmount_marks_by_group(group);
886 else
887 fsnotify_clear_inode_marks_by_group(group);
888 break;
889 default:
890 ret = -EINVAL;
893 path_put(&path);
894 fput_and_out:
895 fdput(f);
896 return ret;
899 #ifdef CONFIG_COMPAT
900 COMPAT_SYSCALL_DEFINE6(fanotify_mark,
901 int, fanotify_fd, unsigned int, flags,
902 __u32, mask0, __u32, mask1, int, dfd,
903 const char __user *, pathname)
905 return sys_fanotify_mark(fanotify_fd, flags,
906 #ifdef __BIG_ENDIAN
907 ((__u64)mask0 << 32) | mask1,
908 #else
909 ((__u64)mask1 << 32) | mask0,
910 #endif
911 dfd, pathname);
913 #endif
916 * fanotify_user_setup - Our initialization function. Note that we cannot return
917 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
918 * must result in panic().
920 static int __init fanotify_user_setup(void)
922 fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
923 fanotify_response_event_cache = KMEM_CACHE(fanotify_response_event,
924 SLAB_PANIC);
925 fanotify_event_cachep = KMEM_CACHE(fanotify_event_info, SLAB_PANIC);
927 return 0;
929 device_initcall(fanotify_user_setup);