Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / fs / notify / fanotify / fanotify_user.c
blobc07eb3d655eaeb3be93c8245ba3dd5fa279320ac
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/fanotify.h>
3 #include <linux/fcntl.h>
4 #include <linux/file.h>
5 #include <linux/fs.h>
6 #include <linux/anon_inodes.h>
7 #include <linux/fsnotify_backend.h>
8 #include <linux/init.h>
9 #include <linux/mount.h>
10 #include <linux/namei.h>
11 #include <linux/poll.h>
12 #include <linux/security.h>
13 #include <linux/syscalls.h>
14 #include <linux/slab.h>
15 #include <linux/types.h>
16 #include <linux/uaccess.h>
17 #include <linux/compat.h>
18 #include <linux/sched/signal.h>
20 #include <asm/ioctls.h>
22 #include "../../mount.h"
23 #include "../fdinfo.h"
24 #include "fanotify.h"
26 #define FANOTIFY_DEFAULT_MAX_EVENTS 16384
27 #define FANOTIFY_DEFAULT_MAX_MARKS 8192
28 #define FANOTIFY_DEFAULT_MAX_LISTENERS 128
31 * All flags that may be specified in parameter event_f_flags of fanotify_init.
33 * Internal and external open flags are stored together in field f_flags of
34 * struct file. Only external open flags shall be allowed in event_f_flags.
35 * Internal flags like FMODE_NONOTIFY, FMODE_EXEC, FMODE_NOCMTIME shall be
36 * excluded.
38 #define FANOTIFY_INIT_ALL_EVENT_F_BITS ( \
39 O_ACCMODE | O_APPEND | O_NONBLOCK | \
40 __O_SYNC | O_DSYNC | O_CLOEXEC | \
41 O_LARGEFILE | O_NOATIME )
43 extern const struct fsnotify_ops fanotify_fsnotify_ops;
45 struct kmem_cache *fanotify_mark_cache __read_mostly;
46 struct kmem_cache *fanotify_event_cachep __read_mostly;
47 struct kmem_cache *fanotify_perm_event_cachep __read_mostly;
50 * Get an fsnotify notification event if one exists and is small
51 * enough to fit in "count". Return an error pointer if the count
52 * is not large enough.
54 * Called with the group->notification_lock held.
56 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
57 size_t count)
59 assert_spin_locked(&group->notification_lock);
61 pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
63 if (fsnotify_notify_queue_is_empty(group))
64 return NULL;
66 if (FAN_EVENT_METADATA_LEN > count)
67 return ERR_PTR(-EINVAL);
69 /* held the notification_lock the whole time, so this is the
70 * same event we peeked above */
71 return fsnotify_remove_first_event(group);
74 static int create_fd(struct fsnotify_group *group,
75 struct fanotify_event_info *event,
76 struct file **file)
78 int client_fd;
79 struct file *new_file;
81 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
83 client_fd = get_unused_fd_flags(group->fanotify_data.f_flags);
84 if (client_fd < 0)
85 return client_fd;
88 * we need a new file handle for the userspace program so it can read even if it was
89 * originally opened O_WRONLY.
91 /* it's possible this event was an overflow event. in that case dentry and mnt
92 * are NULL; That's fine, just don't call dentry open */
93 if (event->path.dentry && event->path.mnt)
94 new_file = dentry_open(&event->path,
95 group->fanotify_data.f_flags | FMODE_NONOTIFY,
96 current_cred());
97 else
98 new_file = ERR_PTR(-EOVERFLOW);
99 if (IS_ERR(new_file)) {
101 * we still send an event even if we can't open the file. this
102 * can happen when say tasks are gone and we try to open their
103 * /proc files or we try to open a WRONLY file like in sysfs
104 * we just send the errno to userspace since there isn't much
105 * else we can do.
107 put_unused_fd(client_fd);
108 client_fd = PTR_ERR(new_file);
109 } else {
110 *file = new_file;
113 return client_fd;
116 static int fill_event_metadata(struct fsnotify_group *group,
117 struct fanotify_event_metadata *metadata,
118 struct fsnotify_event *fsn_event,
119 struct file **file)
121 int ret = 0;
122 struct fanotify_event_info *event;
124 pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
125 group, metadata, fsn_event);
127 *file = NULL;
128 event = container_of(fsn_event, struct fanotify_event_info, fse);
129 metadata->event_len = FAN_EVENT_METADATA_LEN;
130 metadata->metadata_len = FAN_EVENT_METADATA_LEN;
131 metadata->vers = FANOTIFY_METADATA_VERSION;
132 metadata->reserved = 0;
133 metadata->mask = fsn_event->mask & FAN_ALL_OUTGOING_EVENTS;
134 metadata->pid = pid_vnr(event->tgid);
135 if (unlikely(fsn_event->mask & FAN_Q_OVERFLOW))
136 metadata->fd = FAN_NOFD;
137 else {
138 metadata->fd = create_fd(group, event, file);
139 if (metadata->fd < 0)
140 ret = metadata->fd;
143 return ret;
146 static struct fanotify_perm_event_info *dequeue_event(
147 struct fsnotify_group *group, int fd)
149 struct fanotify_perm_event_info *event, *return_e = NULL;
151 spin_lock(&group->notification_lock);
152 list_for_each_entry(event, &group->fanotify_data.access_list,
153 fae.fse.list) {
154 if (event->fd != fd)
155 continue;
157 list_del_init(&event->fae.fse.list);
158 return_e = event;
159 break;
161 spin_unlock(&group->notification_lock);
163 pr_debug("%s: found return_re=%p\n", __func__, return_e);
165 return return_e;
168 static int process_access_response(struct fsnotify_group *group,
169 struct fanotify_response *response_struct)
171 struct fanotify_perm_event_info *event;
172 int fd = response_struct->fd;
173 int response = response_struct->response;
175 pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
176 fd, response);
178 * make sure the response is valid, if invalid we do nothing and either
179 * userspace can send a valid response or we will clean it up after the
180 * timeout
182 switch (response & ~FAN_AUDIT) {
183 case FAN_ALLOW:
184 case FAN_DENY:
185 break;
186 default:
187 return -EINVAL;
190 if (fd < 0)
191 return -EINVAL;
193 if ((response & FAN_AUDIT) && !group->fanotify_data.audit)
194 return -EINVAL;
196 event = dequeue_event(group, fd);
197 if (!event)
198 return -ENOENT;
200 event->response = response;
201 wake_up(&group->fanotify_data.access_waitq);
203 return 0;
206 static ssize_t copy_event_to_user(struct fsnotify_group *group,
207 struct fsnotify_event *event,
208 char __user *buf)
210 struct fanotify_event_metadata fanotify_event_metadata;
211 struct file *f;
212 int fd, ret;
214 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
216 ret = fill_event_metadata(group, &fanotify_event_metadata, event, &f);
217 if (ret < 0)
218 return ret;
220 fd = fanotify_event_metadata.fd;
221 ret = -EFAULT;
222 if (copy_to_user(buf, &fanotify_event_metadata,
223 fanotify_event_metadata.event_len))
224 goto out_close_fd;
226 if (fanotify_is_perm_event(event->mask))
227 FANOTIFY_PE(event)->fd = fd;
229 if (fd != FAN_NOFD)
230 fd_install(fd, f);
231 return fanotify_event_metadata.event_len;
233 out_close_fd:
234 if (fd != FAN_NOFD) {
235 put_unused_fd(fd);
236 fput(f);
238 return ret;
241 /* intofiy userspace file descriptor functions */
242 static __poll_t fanotify_poll(struct file *file, poll_table *wait)
244 struct fsnotify_group *group = file->private_data;
245 __poll_t ret = 0;
247 poll_wait(file, &group->notification_waitq, wait);
248 spin_lock(&group->notification_lock);
249 if (!fsnotify_notify_queue_is_empty(group))
250 ret = EPOLLIN | EPOLLRDNORM;
251 spin_unlock(&group->notification_lock);
253 return ret;
256 static ssize_t fanotify_read(struct file *file, char __user *buf,
257 size_t count, loff_t *pos)
259 struct fsnotify_group *group;
260 struct fsnotify_event *kevent;
261 char __user *start;
262 int ret;
263 DEFINE_WAIT_FUNC(wait, woken_wake_function);
265 start = buf;
266 group = file->private_data;
268 pr_debug("%s: group=%p\n", __func__, group);
270 add_wait_queue(&group->notification_waitq, &wait);
271 while (1) {
272 spin_lock(&group->notification_lock);
273 kevent = get_one_event(group, count);
274 spin_unlock(&group->notification_lock);
276 if (IS_ERR(kevent)) {
277 ret = PTR_ERR(kevent);
278 break;
281 if (!kevent) {
282 ret = -EAGAIN;
283 if (file->f_flags & O_NONBLOCK)
284 break;
286 ret = -ERESTARTSYS;
287 if (signal_pending(current))
288 break;
290 if (start != buf)
291 break;
293 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
294 continue;
297 ret = copy_event_to_user(group, kevent, buf);
298 if (unlikely(ret == -EOPENSTALE)) {
300 * We cannot report events with stale fd so drop it.
301 * Setting ret to 0 will continue the event loop and
302 * do the right thing if there are no more events to
303 * read (i.e. return bytes read, -EAGAIN or wait).
305 ret = 0;
309 * Permission events get queued to wait for response. Other
310 * events can be destroyed now.
312 if (!fanotify_is_perm_event(kevent->mask)) {
313 fsnotify_destroy_event(group, kevent);
314 } else {
315 if (ret <= 0) {
316 FANOTIFY_PE(kevent)->response = FAN_DENY;
317 wake_up(&group->fanotify_data.access_waitq);
318 } else {
319 spin_lock(&group->notification_lock);
320 list_add_tail(&kevent->list,
321 &group->fanotify_data.access_list);
322 spin_unlock(&group->notification_lock);
325 if (ret < 0)
326 break;
327 buf += ret;
328 count -= ret;
330 remove_wait_queue(&group->notification_waitq, &wait);
332 if (start != buf && ret != -EFAULT)
333 ret = buf - start;
334 return ret;
337 static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
339 struct fanotify_response response = { .fd = -1, .response = -1 };
340 struct fsnotify_group *group;
341 int ret;
343 if (!IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS))
344 return -EINVAL;
346 group = file->private_data;
348 if (count > sizeof(response))
349 count = sizeof(response);
351 pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
353 if (copy_from_user(&response, buf, count))
354 return -EFAULT;
356 ret = process_access_response(group, &response);
357 if (ret < 0)
358 count = ret;
360 return count;
363 static int fanotify_release(struct inode *ignored, struct file *file)
365 struct fsnotify_group *group = file->private_data;
366 struct fanotify_perm_event_info *event, *next;
367 struct fsnotify_event *fsn_event;
370 * Stop new events from arriving in the notification queue. since
371 * userspace cannot use fanotify fd anymore, no event can enter or
372 * leave access_list by now either.
374 fsnotify_group_stop_queueing(group);
377 * Process all permission events on access_list and notification queue
378 * and simulate reply from userspace.
380 spin_lock(&group->notification_lock);
381 list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
382 fae.fse.list) {
383 pr_debug("%s: found group=%p event=%p\n", __func__, group,
384 event);
386 list_del_init(&event->fae.fse.list);
387 event->response = FAN_ALLOW;
391 * Destroy all non-permission events. For permission events just
392 * dequeue them and set the response. They will be freed once the
393 * response is consumed and fanotify_get_response() returns.
395 while (!fsnotify_notify_queue_is_empty(group)) {
396 fsn_event = fsnotify_remove_first_event(group);
397 if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS)) {
398 spin_unlock(&group->notification_lock);
399 fsnotify_destroy_event(group, fsn_event);
400 spin_lock(&group->notification_lock);
401 } else {
402 FANOTIFY_PE(fsn_event)->response = FAN_ALLOW;
405 spin_unlock(&group->notification_lock);
407 /* Response for all permission events it set, wakeup waiters */
408 wake_up(&group->fanotify_data.access_waitq);
410 /* matches the fanotify_init->fsnotify_alloc_group */
411 fsnotify_destroy_group(group);
413 return 0;
416 static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
418 struct fsnotify_group *group;
419 struct fsnotify_event *fsn_event;
420 void __user *p;
421 int ret = -ENOTTY;
422 size_t send_len = 0;
424 group = file->private_data;
426 p = (void __user *) arg;
428 switch (cmd) {
429 case FIONREAD:
430 spin_lock(&group->notification_lock);
431 list_for_each_entry(fsn_event, &group->notification_list, list)
432 send_len += FAN_EVENT_METADATA_LEN;
433 spin_unlock(&group->notification_lock);
434 ret = put_user(send_len, (int __user *) p);
435 break;
438 return ret;
441 static const struct file_operations fanotify_fops = {
442 .show_fdinfo = fanotify_show_fdinfo,
443 .poll = fanotify_poll,
444 .read = fanotify_read,
445 .write = fanotify_write,
446 .fasync = NULL,
447 .release = fanotify_release,
448 .unlocked_ioctl = fanotify_ioctl,
449 .compat_ioctl = fanotify_ioctl,
450 .llseek = noop_llseek,
453 static int fanotify_find_path(int dfd, const char __user *filename,
454 struct path *path, unsigned int flags)
456 int ret;
458 pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
459 dfd, filename, flags);
461 if (filename == NULL) {
462 struct fd f = fdget(dfd);
464 ret = -EBADF;
465 if (!f.file)
466 goto out;
468 ret = -ENOTDIR;
469 if ((flags & FAN_MARK_ONLYDIR) &&
470 !(S_ISDIR(file_inode(f.file)->i_mode))) {
471 fdput(f);
472 goto out;
475 *path = f.file->f_path;
476 path_get(path);
477 fdput(f);
478 } else {
479 unsigned int lookup_flags = 0;
481 if (!(flags & FAN_MARK_DONT_FOLLOW))
482 lookup_flags |= LOOKUP_FOLLOW;
483 if (flags & FAN_MARK_ONLYDIR)
484 lookup_flags |= LOOKUP_DIRECTORY;
486 ret = user_path_at(dfd, filename, lookup_flags, path);
487 if (ret)
488 goto out;
491 /* you can only watch an inode if you have read permissions on it */
492 ret = inode_permission(path->dentry->d_inode, MAY_READ);
493 if (ret)
494 path_put(path);
495 out:
496 return ret;
499 static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
500 __u32 mask,
501 unsigned int flags,
502 int *destroy)
504 __u32 oldmask = 0;
506 spin_lock(&fsn_mark->lock);
507 if (!(flags & FAN_MARK_IGNORED_MASK)) {
508 __u32 tmask = fsn_mark->mask & ~mask;
510 if (flags & FAN_MARK_ONDIR)
511 tmask &= ~FAN_ONDIR;
513 oldmask = fsn_mark->mask;
514 fsn_mark->mask = tmask;
515 } else {
516 __u32 tmask = fsn_mark->ignored_mask & ~mask;
517 if (flags & FAN_MARK_ONDIR)
518 tmask &= ~FAN_ONDIR;
519 fsn_mark->ignored_mask = tmask;
521 *destroy = !(fsn_mark->mask | fsn_mark->ignored_mask);
522 spin_unlock(&fsn_mark->lock);
524 return mask & oldmask;
527 static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
528 struct vfsmount *mnt, __u32 mask,
529 unsigned int flags)
531 struct fsnotify_mark *fsn_mark = NULL;
532 __u32 removed;
533 int destroy_mark;
535 mutex_lock(&group->mark_mutex);
536 fsn_mark = fsnotify_find_mark(&real_mount(mnt)->mnt_fsnotify_marks,
537 group);
538 if (!fsn_mark) {
539 mutex_unlock(&group->mark_mutex);
540 return -ENOENT;
543 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
544 &destroy_mark);
545 if (removed & real_mount(mnt)->mnt_fsnotify_mask)
546 fsnotify_recalc_mask(real_mount(mnt)->mnt_fsnotify_marks);
547 if (destroy_mark)
548 fsnotify_detach_mark(fsn_mark);
549 mutex_unlock(&group->mark_mutex);
550 if (destroy_mark)
551 fsnotify_free_mark(fsn_mark);
553 fsnotify_put_mark(fsn_mark);
554 return 0;
557 static int fanotify_remove_inode_mark(struct fsnotify_group *group,
558 struct inode *inode, __u32 mask,
559 unsigned int flags)
561 struct fsnotify_mark *fsn_mark = NULL;
562 __u32 removed;
563 int destroy_mark;
565 mutex_lock(&group->mark_mutex);
566 fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
567 if (!fsn_mark) {
568 mutex_unlock(&group->mark_mutex);
569 return -ENOENT;
572 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
573 &destroy_mark);
574 if (removed & inode->i_fsnotify_mask)
575 fsnotify_recalc_mask(inode->i_fsnotify_marks);
576 if (destroy_mark)
577 fsnotify_detach_mark(fsn_mark);
578 mutex_unlock(&group->mark_mutex);
579 if (destroy_mark)
580 fsnotify_free_mark(fsn_mark);
582 /* matches the fsnotify_find_mark() */
583 fsnotify_put_mark(fsn_mark);
585 return 0;
588 static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
589 __u32 mask,
590 unsigned int flags)
592 __u32 oldmask = -1;
594 spin_lock(&fsn_mark->lock);
595 if (!(flags & FAN_MARK_IGNORED_MASK)) {
596 __u32 tmask = fsn_mark->mask | mask;
598 if (flags & FAN_MARK_ONDIR)
599 tmask |= FAN_ONDIR;
601 oldmask = fsn_mark->mask;
602 fsn_mark->mask = tmask;
603 } else {
604 __u32 tmask = fsn_mark->ignored_mask | mask;
605 if (flags & FAN_MARK_ONDIR)
606 tmask |= FAN_ONDIR;
608 fsn_mark->ignored_mask = tmask;
609 if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
610 fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
612 spin_unlock(&fsn_mark->lock);
614 return mask & ~oldmask;
617 static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group,
618 struct inode *inode,
619 struct vfsmount *mnt)
621 struct fsnotify_mark *mark;
622 int ret;
624 if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
625 return ERR_PTR(-ENOSPC);
627 mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
628 if (!mark)
629 return ERR_PTR(-ENOMEM);
631 fsnotify_init_mark(mark, group);
632 ret = fsnotify_add_mark_locked(mark, inode, mnt, 0);
633 if (ret) {
634 fsnotify_put_mark(mark);
635 return ERR_PTR(ret);
638 return mark;
642 static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
643 struct vfsmount *mnt, __u32 mask,
644 unsigned int flags)
646 struct fsnotify_mark *fsn_mark;
647 __u32 added;
649 mutex_lock(&group->mark_mutex);
650 fsn_mark = fsnotify_find_mark(&real_mount(mnt)->mnt_fsnotify_marks,
651 group);
652 if (!fsn_mark) {
653 fsn_mark = fanotify_add_new_mark(group, NULL, mnt);
654 if (IS_ERR(fsn_mark)) {
655 mutex_unlock(&group->mark_mutex);
656 return PTR_ERR(fsn_mark);
659 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
660 if (added & ~real_mount(mnt)->mnt_fsnotify_mask)
661 fsnotify_recalc_mask(real_mount(mnt)->mnt_fsnotify_marks);
662 mutex_unlock(&group->mark_mutex);
664 fsnotify_put_mark(fsn_mark);
665 return 0;
668 static int fanotify_add_inode_mark(struct fsnotify_group *group,
669 struct inode *inode, __u32 mask,
670 unsigned int flags)
672 struct fsnotify_mark *fsn_mark;
673 __u32 added;
675 pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
678 * If some other task has this inode open for write we should not add
679 * an ignored mark, unless that ignored mark is supposed to survive
680 * modification changes anyway.
682 if ((flags & FAN_MARK_IGNORED_MASK) &&
683 !(flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
684 (atomic_read(&inode->i_writecount) > 0))
685 return 0;
687 mutex_lock(&group->mark_mutex);
688 fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
689 if (!fsn_mark) {
690 fsn_mark = fanotify_add_new_mark(group, inode, NULL);
691 if (IS_ERR(fsn_mark)) {
692 mutex_unlock(&group->mark_mutex);
693 return PTR_ERR(fsn_mark);
696 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
697 if (added & ~inode->i_fsnotify_mask)
698 fsnotify_recalc_mask(inode->i_fsnotify_marks);
699 mutex_unlock(&group->mark_mutex);
701 fsnotify_put_mark(fsn_mark);
702 return 0;
705 /* fanotify syscalls */
706 SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
708 struct fsnotify_group *group;
709 int f_flags, fd;
710 struct user_struct *user;
711 struct fanotify_event_info *oevent;
713 pr_debug("%s: flags=%d event_f_flags=%d\n",
714 __func__, flags, event_f_flags);
716 if (!capable(CAP_SYS_ADMIN))
717 return -EPERM;
719 #ifdef CONFIG_AUDITSYSCALL
720 if (flags & ~(FAN_ALL_INIT_FLAGS | FAN_ENABLE_AUDIT))
721 #else
722 if (flags & ~FAN_ALL_INIT_FLAGS)
723 #endif
724 return -EINVAL;
726 if (event_f_flags & ~FANOTIFY_INIT_ALL_EVENT_F_BITS)
727 return -EINVAL;
729 switch (event_f_flags & O_ACCMODE) {
730 case O_RDONLY:
731 case O_RDWR:
732 case O_WRONLY:
733 break;
734 default:
735 return -EINVAL;
738 user = get_current_user();
739 if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) {
740 free_uid(user);
741 return -EMFILE;
744 f_flags = O_RDWR | FMODE_NONOTIFY;
745 if (flags & FAN_CLOEXEC)
746 f_flags |= O_CLOEXEC;
747 if (flags & FAN_NONBLOCK)
748 f_flags |= O_NONBLOCK;
750 /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */
751 group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
752 if (IS_ERR(group)) {
753 free_uid(user);
754 return PTR_ERR(group);
757 group->fanotify_data.user = user;
758 atomic_inc(&user->fanotify_listeners);
760 oevent = fanotify_alloc_event(NULL, FS_Q_OVERFLOW, NULL);
761 if (unlikely(!oevent)) {
762 fd = -ENOMEM;
763 goto out_destroy_group;
765 group->overflow_event = &oevent->fse;
767 if (force_o_largefile())
768 event_f_flags |= O_LARGEFILE;
769 group->fanotify_data.f_flags = event_f_flags;
770 init_waitqueue_head(&group->fanotify_data.access_waitq);
771 INIT_LIST_HEAD(&group->fanotify_data.access_list);
772 switch (flags & FAN_ALL_CLASS_BITS) {
773 case FAN_CLASS_NOTIF:
774 group->priority = FS_PRIO_0;
775 break;
776 case FAN_CLASS_CONTENT:
777 group->priority = FS_PRIO_1;
778 break;
779 case FAN_CLASS_PRE_CONTENT:
780 group->priority = FS_PRIO_2;
781 break;
782 default:
783 fd = -EINVAL;
784 goto out_destroy_group;
787 if (flags & FAN_UNLIMITED_QUEUE) {
788 fd = -EPERM;
789 if (!capable(CAP_SYS_ADMIN))
790 goto out_destroy_group;
791 group->max_events = UINT_MAX;
792 } else {
793 group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS;
796 if (flags & FAN_UNLIMITED_MARKS) {
797 fd = -EPERM;
798 if (!capable(CAP_SYS_ADMIN))
799 goto out_destroy_group;
800 group->fanotify_data.max_marks = UINT_MAX;
801 } else {
802 group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS;
805 if (flags & FAN_ENABLE_AUDIT) {
806 fd = -EPERM;
807 if (!capable(CAP_AUDIT_WRITE))
808 goto out_destroy_group;
809 group->fanotify_data.audit = true;
812 fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
813 if (fd < 0)
814 goto out_destroy_group;
816 return fd;
818 out_destroy_group:
819 fsnotify_destroy_group(group);
820 return fd;
823 SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags,
824 __u64, mask, int, dfd,
825 const char __user *, pathname)
827 struct inode *inode = NULL;
828 struct vfsmount *mnt = NULL;
829 struct fsnotify_group *group;
830 struct fd f;
831 struct path path;
832 u32 valid_mask = FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD;
833 int ret;
835 pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
836 __func__, fanotify_fd, flags, dfd, pathname, mask);
838 /* we only use the lower 32 bits as of right now. */
839 if (mask & ((__u64)0xffffffff << 32))
840 return -EINVAL;
842 if (flags & ~FAN_ALL_MARK_FLAGS)
843 return -EINVAL;
844 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
845 case FAN_MARK_ADD: /* fallthrough */
846 case FAN_MARK_REMOVE:
847 if (!mask)
848 return -EINVAL;
849 break;
850 case FAN_MARK_FLUSH:
851 if (flags & ~(FAN_MARK_MOUNT | FAN_MARK_FLUSH))
852 return -EINVAL;
853 break;
854 default:
855 return -EINVAL;
858 if (mask & FAN_ONDIR) {
859 flags |= FAN_MARK_ONDIR;
860 mask &= ~FAN_ONDIR;
863 if (IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS))
864 valid_mask |= FAN_ALL_PERM_EVENTS;
866 if (mask & ~valid_mask)
867 return -EINVAL;
869 f = fdget(fanotify_fd);
870 if (unlikely(!f.file))
871 return -EBADF;
873 /* verify that this is indeed an fanotify instance */
874 ret = -EINVAL;
875 if (unlikely(f.file->f_op != &fanotify_fops))
876 goto fput_and_out;
877 group = f.file->private_data;
880 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF. These are not
881 * allowed to set permissions events.
883 ret = -EINVAL;
884 if (mask & FAN_ALL_PERM_EVENTS &&
885 group->priority == FS_PRIO_0)
886 goto fput_and_out;
888 if (flags & FAN_MARK_FLUSH) {
889 ret = 0;
890 if (flags & FAN_MARK_MOUNT)
891 fsnotify_clear_vfsmount_marks_by_group(group);
892 else
893 fsnotify_clear_inode_marks_by_group(group);
894 goto fput_and_out;
897 ret = fanotify_find_path(dfd, pathname, &path, flags);
898 if (ret)
899 goto fput_and_out;
901 /* inode held in place by reference to path; group by fget on fd */
902 if (!(flags & FAN_MARK_MOUNT))
903 inode = path.dentry->d_inode;
904 else
905 mnt = path.mnt;
907 /* create/update an inode mark */
908 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE)) {
909 case FAN_MARK_ADD:
910 if (flags & FAN_MARK_MOUNT)
911 ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
912 else
913 ret = fanotify_add_inode_mark(group, inode, mask, flags);
914 break;
915 case FAN_MARK_REMOVE:
916 if (flags & FAN_MARK_MOUNT)
917 ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
918 else
919 ret = fanotify_remove_inode_mark(group, inode, mask, flags);
920 break;
921 default:
922 ret = -EINVAL;
925 path_put(&path);
926 fput_and_out:
927 fdput(f);
928 return ret;
931 #ifdef CONFIG_COMPAT
932 COMPAT_SYSCALL_DEFINE6(fanotify_mark,
933 int, fanotify_fd, unsigned int, flags,
934 __u32, mask0, __u32, mask1, int, dfd,
935 const char __user *, pathname)
937 return sys_fanotify_mark(fanotify_fd, flags,
938 #ifdef __BIG_ENDIAN
939 ((__u64)mask0 << 32) | mask1,
940 #else
941 ((__u64)mask1 << 32) | mask0,
942 #endif
943 dfd, pathname);
945 #endif
948 * fanotify_user_setup - Our initialization function. Note that we cannot return
949 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
950 * must result in panic().
952 static int __init fanotify_user_setup(void)
954 fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
955 fanotify_event_cachep = KMEM_CACHE(fanotify_event_info, SLAB_PANIC);
956 if (IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS)) {
957 fanotify_perm_event_cachep =
958 KMEM_CACHE(fanotify_perm_event_info, SLAB_PANIC);
961 return 0;
963 device_initcall(fanotify_user_setup);