Merge branch 'akpm'
[linux-2.6/next.git] / fs / notify / fanotify / fanotify_user.c
blob3fe62cb9ad34b99fa0a4a8b569e5c6038c59b510
1 #include <linux/fanotify.h>
2 #include <linux/fcntl.h>
3 #include <linux/file.h>
4 #include <linux/fs.h>
5 #include <linux/anon_inodes.h>
6 #include <linux/fsnotify_backend.h>
7 #include <linux/init.h>
8 #include <linux/mount.h>
9 #include <linux/namei.h>
10 #include <linux/poll.h>
11 #include <linux/security.h>
12 #include <linux/syscalls.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/uaccess.h>
17 #include <asm/ioctls.h>
19 #define FANOTIFY_DEFAULT_MAX_EVENTS 16384
20 #define FANOTIFY_DEFAULT_MAX_MARKS 8192
21 #define FANOTIFY_DEFAULT_MAX_LISTENERS 128
23 extern const struct fsnotify_ops fanotify_fsnotify_ops;
25 static struct kmem_cache *fanotify_mark_cache __read_mostly;
26 static struct kmem_cache *fanotify_response_event_cache __read_mostly;
28 struct fanotify_response_event {
29 struct list_head list;
30 __s32 fd;
31 struct fsnotify_event *event;
35 * Get an fsnotify notification event if one exists and is small
36 * enough to fit in "count". Return an error pointer if the count
37 * is not large enough.
39 * Called with the group->notification_mutex held.
41 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
42 size_t count)
44 BUG_ON(!mutex_is_locked(&group->notification_mutex));
46 pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
48 if (fsnotify_notify_queue_is_empty(group))
49 return NULL;
51 if (FAN_EVENT_METADATA_LEN > count)
52 return ERR_PTR(-EINVAL);
54 /* held the notification_mutex the whole time, so this is the
55 * same event we peeked above */
56 return fsnotify_remove_notify_event(group);
59 static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event)
61 int client_fd;
62 struct dentry *dentry;
63 struct vfsmount *mnt;
64 struct file *new_file;
65 unsigned int flags;
67 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
69 client_fd = get_unused_fd();
70 if (client_fd < 0)
71 return client_fd;
73 if (event->data_type != FSNOTIFY_EVENT_PATH) {
74 WARN_ON(1);
75 put_unused_fd(client_fd);
76 return -EINVAL;
80 * we need a new file handle for the userspace program so it can read even if it was
81 * originally opened O_WRONLY.
83 dentry = dget(event->path.dentry);
84 mnt = mntget(event->path.mnt);
85 /* it's possible this event was an overflow event. in that case dentry and mnt
86 * are NULL; That's fine, just don't call dentry open */
87 if (dentry && mnt) {
88 flags = group->fanotify_data.f_flags;
89 new_file = dentry_open(dentry, mnt, flags, current_cred());
91 * Attempt fallback to read-only access if writable was not possible
92 * in order to at least provide something to the listener.
94 if (IS_ERR(new_file) && group->fanotify_data.readonly_fallback) {
95 flags &= ~O_ACCMODE;
96 flags |= O_RDONLY;
97 new_file = dentry_open(dentry, mnt, flags,
98 current_cred());
100 } else {
101 new_file = ERR_PTR(-EOVERFLOW);
103 if (IS_ERR(new_file)) {
105 * we still send an event even if we can't open the file. this
106 * can happen when say tasks are gone and we try to open their
107 * /proc files or we try to open a WRONLY file like in sysfs
108 * we just send the errno to userspace since there isn't much
109 * else we can do.
111 put_unused_fd(client_fd);
112 client_fd = PTR_ERR(new_file);
113 } else {
114 fd_install(client_fd, new_file);
117 return client_fd;
120 static int fill_event_metadata(struct fsnotify_group *group,
121 struct fanotify_event_metadata *metadata,
122 struct fsnotify_event *event)
124 int ret = 0;
126 pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
127 group, metadata, event);
129 metadata->event_len = FAN_EVENT_METADATA_LEN;
130 metadata->metadata_len = FAN_EVENT_METADATA_LEN;
131 metadata->vers = FANOTIFY_METADATA_VERSION;
132 metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
133 metadata->pid = pid_vnr(event->tgid);
134 if (unlikely(event->mask & FAN_Q_OVERFLOW))
135 metadata->fd = FAN_NOFD;
136 else {
137 metadata->fd = create_fd(group, event);
138 if (metadata->fd < 0)
139 ret = metadata->fd;
142 return ret;
145 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
146 static struct fanotify_response_event *dequeue_re(struct fsnotify_group *group,
147 __s32 fd)
149 struct fanotify_response_event *re, *return_re = NULL;
151 mutex_lock(&group->fanotify_data.access_mutex);
152 list_for_each_entry(re, &group->fanotify_data.access_list, list) {
153 if (re->fd != fd)
154 continue;
156 list_del_init(&re->list);
157 return_re = re;
158 break;
160 mutex_unlock(&group->fanotify_data.access_mutex);
162 pr_debug("%s: found return_re=%p\n", __func__, return_re);
164 return return_re;
167 static int process_access_response(struct fsnotify_group *group,
168 struct fanotify_response *response_struct)
170 struct fanotify_response_event *re;
171 __s32 fd = response_struct->fd;
172 __u32 response = response_struct->response;
174 pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
175 fd, response);
177 * make sure the response is valid, if invalid we do nothing and either
178 * userspace can send a valid response or we will clean it up after the
179 * timeout
181 switch (response) {
182 case FAN_ALLOW:
183 case FAN_DENY:
184 break;
185 default:
186 return -EINVAL;
189 if (fd < 0)
190 return -EINVAL;
192 re = dequeue_re(group, fd);
193 if (!re)
194 return -ENOENT;
196 re->event->response = response;
198 wake_up(&group->fanotify_data.access_waitq);
200 kmem_cache_free(fanotify_response_event_cache, re);
202 return 0;
205 static int prepare_for_access_response(struct fsnotify_group *group,
206 struct fsnotify_event *event,
207 __s32 fd)
209 struct fanotify_response_event *re;
211 if (!(event->mask & FAN_ALL_PERM_EVENTS))
212 return 0;
214 re = kmem_cache_alloc(fanotify_response_event_cache, GFP_KERNEL);
215 if (!re)
216 return -ENOMEM;
218 re->event = event;
219 re->fd = fd;
221 mutex_lock(&group->fanotify_data.access_mutex);
222 list_add_tail(&re->list, &group->fanotify_data.access_list);
223 mutex_unlock(&group->fanotify_data.access_mutex);
225 return 0;
228 static void remove_access_response(struct fsnotify_group *group,
229 struct fsnotify_event *event,
230 __s32 fd)
232 struct fanotify_response_event *re;
234 if (!(event->mask & FAN_ALL_PERM_EVENTS))
235 return;
237 re = dequeue_re(group, fd);
238 if (!re)
239 return;
241 BUG_ON(re->event != event);
243 kmem_cache_free(fanotify_response_event_cache, re);
245 return;
247 #else
248 static int prepare_for_access_response(struct fsnotify_group *group,
249 struct fsnotify_event *event,
250 __s32 fd)
252 return 0;
255 static void remove_access_response(struct fsnotify_group *group,
256 struct fsnotify_event *event,
257 __s32 fd)
259 return;
261 #endif
263 static ssize_t copy_event_to_user(struct fsnotify_group *group,
264 struct fsnotify_event *event,
265 char __user *buf)
267 struct fanotify_event_metadata fanotify_event_metadata;
268 int fd, ret;
270 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
272 ret = fill_event_metadata(group, &fanotify_event_metadata, event);
273 if (ret < 0)
274 goto out;
276 fd = fanotify_event_metadata.fd;
277 ret = prepare_for_access_response(group, event, fd);
278 if (ret)
279 goto out_close_fd;
281 ret = -EFAULT;
282 if (copy_to_user(buf, &fanotify_event_metadata,
283 fanotify_event_metadata.event_len))
284 goto out_kill_access_response;
286 return fanotify_event_metadata.event_len;
288 out_kill_access_response:
289 remove_access_response(group, event, fd);
290 out_close_fd:
291 if (fd != FAN_NOFD)
292 sys_close(fd);
293 out:
294 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
295 if (event->mask & FAN_ALL_PERM_EVENTS) {
296 event->response = FAN_DENY;
297 wake_up(&group->fanotify_data.access_waitq);
299 #endif
300 return ret;
303 /* intofiy userspace file descriptor functions */
304 static unsigned int fanotify_poll(struct file *file, poll_table *wait)
306 struct fsnotify_group *group = file->private_data;
307 int ret = 0;
309 poll_wait(file, &group->notification_waitq, wait);
310 mutex_lock(&group->notification_mutex);
311 if (!fsnotify_notify_queue_is_empty(group))
312 ret = POLLIN | POLLRDNORM;
313 mutex_unlock(&group->notification_mutex);
315 return ret;
318 static ssize_t fanotify_read(struct file *file, char __user *buf,
319 size_t count, loff_t *pos)
321 struct fsnotify_group *group;
322 struct fsnotify_event *kevent;
323 char __user *start;
324 int ret;
325 DEFINE_WAIT(wait);
327 start = buf;
328 group = file->private_data;
330 pr_debug("%s: group=%p\n", __func__, group);
332 while (1) {
333 prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
335 mutex_lock(&group->notification_mutex);
336 kevent = get_one_event(group, count);
337 mutex_unlock(&group->notification_mutex);
339 if (kevent) {
340 ret = PTR_ERR(kevent);
341 if (IS_ERR(kevent))
342 break;
343 ret = copy_event_to_user(group, kevent, buf);
344 fsnotify_put_event(kevent);
345 if (ret < 0)
346 break;
347 buf += ret;
348 count -= ret;
349 continue;
352 ret = -EAGAIN;
353 if (file->f_flags & O_NONBLOCK)
354 break;
355 ret = -ERESTARTSYS;
356 if (signal_pending(current))
357 break;
359 if (start != buf)
360 break;
362 schedule();
365 finish_wait(&group->notification_waitq, &wait);
366 if (start != buf && ret != -EFAULT)
367 ret = buf - start;
368 return ret;
371 static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
373 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
374 struct fanotify_response response = { .fd = -1, .response = -1 };
375 struct fsnotify_group *group;
376 int ret;
378 group = file->private_data;
380 if (count > sizeof(response))
381 count = sizeof(response);
383 pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
385 if (copy_from_user(&response, buf, count))
386 return -EFAULT;
388 ret = process_access_response(group, &response);
389 if (ret < 0)
390 count = ret;
392 return count;
393 #else
394 return -EINVAL;
395 #endif
398 static int fanotify_release(struct inode *ignored, struct file *file)
400 struct fsnotify_group *group = file->private_data;
402 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
403 struct fanotify_response_event *re, *lre;
405 mutex_lock(&group->fanotify_data.access_mutex);
407 atomic_inc(&group->fanotify_data.bypass_perm);
409 list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) {
410 pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group,
411 re, re->event);
413 list_del_init(&re->list);
414 re->event->response = FAN_ALLOW;
416 kmem_cache_free(fanotify_response_event_cache, re);
418 mutex_unlock(&group->fanotify_data.access_mutex);
420 wake_up(&group->fanotify_data.access_waitq);
421 #endif
422 /* matches the fanotify_init->fsnotify_alloc_group */
423 fsnotify_put_group(group);
425 return 0;
428 static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
430 struct fsnotify_group *group;
431 struct fsnotify_event_holder *holder;
432 void __user *p;
433 int ret = -ENOTTY;
434 size_t send_len = 0;
436 group = file->private_data;
438 p = (void __user *) arg;
440 switch (cmd) {
441 case FIONREAD:
442 mutex_lock(&group->notification_mutex);
443 list_for_each_entry(holder, &group->notification_list, event_list)
444 send_len += FAN_EVENT_METADATA_LEN;
445 mutex_unlock(&group->notification_mutex);
446 ret = put_user(send_len, (int __user *) p);
447 break;
450 return ret;
453 static const struct file_operations fanotify_fops = {
454 .poll = fanotify_poll,
455 .read = fanotify_read,
456 .write = fanotify_write,
457 .fasync = NULL,
458 .release = fanotify_release,
459 .unlocked_ioctl = fanotify_ioctl,
460 .compat_ioctl = fanotify_ioctl,
461 .llseek = noop_llseek,
464 static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
466 kmem_cache_free(fanotify_mark_cache, fsn_mark);
469 static int fanotify_find_path(int dfd, const char __user *filename,
470 struct path *path, unsigned int flags)
472 int ret;
474 pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
475 dfd, filename, flags);
477 if (filename == NULL) {
478 struct file *file;
479 int fput_needed;
481 ret = -EBADF;
482 file = fget_light(dfd, &fput_needed);
483 if (!file)
484 goto out;
486 ret = -ENOTDIR;
487 if ((flags & FAN_MARK_ONLYDIR) &&
488 !(S_ISDIR(file->f_path.dentry->d_inode->i_mode))) {
489 fput_light(file, fput_needed);
490 goto out;
493 *path = file->f_path;
494 path_get(path);
495 fput_light(file, fput_needed);
496 } else {
497 unsigned int lookup_flags = 0;
499 if (!(flags & FAN_MARK_DONT_FOLLOW))
500 lookup_flags |= LOOKUP_FOLLOW;
501 if (flags & FAN_MARK_ONLYDIR)
502 lookup_flags |= LOOKUP_DIRECTORY;
504 ret = user_path_at(dfd, filename, lookup_flags, path);
505 if (ret)
506 goto out;
509 /* you can only watch an inode if you have read permissions on it */
510 ret = inode_permission(path->dentry->d_inode, MAY_READ);
511 if (ret)
512 path_put(path);
513 out:
514 return ret;
517 static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
518 __u32 mask,
519 unsigned int flags)
521 __u32 oldmask;
522 int destroy_mark;
524 spin_lock(&fsn_mark->lock);
525 if (!(flags & FAN_MARK_IGNORED_MASK)) {
526 oldmask = fsn_mark->mask;
527 fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask));
528 } else {
529 oldmask = fsn_mark->ignored_mask;
530 fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask));
532 destroy_mark = (!fsn_mark->mask && !fsn_mark->ignored_mask);
533 spin_unlock(&fsn_mark->lock);
535 if (destroy_mark)
536 fsnotify_destroy_mark(fsn_mark);
538 return mask & oldmask;
541 static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
542 struct vfsmount *mnt, __u32 mask,
543 unsigned int flags)
545 struct fsnotify_mark *fsn_mark = NULL;
546 __u32 removed;
547 int ret;
549 mutex_lock(&group->mutex);
550 ret = -ENOENT;
551 fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
552 if (!fsn_mark)
553 goto err;
555 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
556 fsnotify_put_mark(fsn_mark);
557 if (removed & mnt->mnt_fsnotify_mask)
558 fsnotify_recalc_vfsmount_mask(mnt);
559 ret = 0;
560 err:
561 mutex_unlock(&group->mutex);
563 return ret;
566 static int fanotify_remove_inode_mark(struct fsnotify_group *group,
567 struct inode *inode, __u32 mask,
568 unsigned int flags)
570 struct fsnotify_mark *fsn_mark = NULL;
571 __u32 removed;
572 int ret;
574 mutex_lock(&group->mutex);
575 ret = -ENOENT;
576 fsn_mark = fsnotify_find_inode_mark(group, inode);
577 if (!fsn_mark)
578 goto err;
580 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
581 /* matches the fsnotify_find_inode_mark() */
582 fsnotify_put_mark(fsn_mark);
583 if (removed & inode->i_fsnotify_mask)
584 fsnotify_recalc_inode_mask(inode);
585 ret = 0;
586 err:
587 mutex_unlock(&group->mutex);
589 return ret;
592 static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
593 __u32 mask,
594 unsigned int flags)
596 __u32 oldmask = -1;
598 spin_lock(&fsn_mark->lock);
599 if (!(flags & FAN_MARK_IGNORED_MASK)) {
600 oldmask = fsn_mark->mask;
601 fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask));
602 } else {
603 __u32 tmask = fsn_mark->ignored_mask | mask;
604 fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
605 if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
606 fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
609 if (!(flags & FAN_MARK_ONDIR)) {
610 __u32 tmask = fsn_mark->ignored_mask | FAN_ONDIR;
611 fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
614 spin_unlock(&fsn_mark->lock);
616 return mask & ~oldmask;
619 static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
620 struct vfsmount *mnt, __u32 mask,
621 unsigned int flags)
623 struct fsnotify_mark *fsn_mark;
624 __u32 added;
625 int ret;
627 mutex_lock(&group->mutex);
628 fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
629 if (!fsn_mark) {
630 ret = -ENOSPC;
631 if (atomic_read(&group->num_marks) >
632 group->fanotify_data.max_marks)
633 goto err;
635 ret = -ENOMEM;
636 fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
637 if (!fsn_mark)
638 goto err;
640 fsnotify_init_mark(fsn_mark, fanotify_free_mark);
641 ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0);
642 if (ret)
643 goto err2;
645 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
647 if (added & ~mnt->mnt_fsnotify_mask)
648 fsnotify_recalc_vfsmount_mask(mnt);
649 ret = 0;
650 err2:
651 fsnotify_put_mark(fsn_mark);
652 err:
653 mutex_unlock(&group->mutex);
654 return ret;
657 static int fanotify_add_inode_mark(struct fsnotify_group *group,
658 struct inode *inode, __u32 mask,
659 unsigned int flags)
661 struct fsnotify_mark *fsn_mark;
662 __u32 added;
663 int ret;
665 pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
668 * If some other task has this inode open for write we should not add
669 * an ignored mark, unless that ignored mark is supposed to survive
670 * modification changes anyway.
672 if ((flags & FAN_MARK_IGNORED_MASK) &&
673 !(flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
674 (atomic_read(&inode->i_writecount) > 0))
675 return 0;
677 mutex_lock(&group->mutex);
678 fsn_mark = fsnotify_find_inode_mark(group, inode);
679 if (!fsn_mark) {
680 ret = -ENOSPC;
681 if (atomic_read(&group->num_marks) >
682 group->fanotify_data.max_marks)
683 goto err;
685 ret = -ENOMEM;
686 fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
687 if (!fsn_mark)
688 goto err;
690 fsnotify_init_mark(fsn_mark, fanotify_free_mark);
691 ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0);
692 if (ret)
693 goto err2;
695 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
697 if (added & ~inode->i_fsnotify_mask)
698 fsnotify_recalc_inode_mask(inode);
699 ret = 0;
700 err2:
701 fsnotify_put_mark(fsn_mark);
702 err:
703 mutex_unlock(&group->mutex);
704 return ret;
707 /* fanotify syscalls */
708 SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
710 struct fsnotify_group *group;
711 int f_flags, fd;
712 struct user_struct *user;
714 pr_debug("%s: flags=%d event_f_flags=%d\n",
715 __func__, flags, event_f_flags);
717 if (!capable(CAP_SYS_ADMIN))
718 return -EPERM;
720 if (flags & ~FAN_ALL_INIT_FLAGS)
721 return -EINVAL;
723 user = get_current_user();
724 if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) {
725 free_uid(user);
726 return -EMFILE;
729 f_flags = O_RDWR | FMODE_NONOTIFY;
730 if (flags & FAN_CLOEXEC)
731 f_flags |= O_CLOEXEC;
732 if (flags & FAN_NONBLOCK)
733 f_flags |= O_NONBLOCK;
735 /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */
736 group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
737 if (IS_ERR(group)) {
738 free_uid(user);
739 return PTR_ERR(group);
742 group->fanotify_data.user = user;
743 atomic_inc(&user->fanotify_listeners);
745 group->fanotify_data.f_flags = event_f_flags | FMODE_NONOTIFY;
746 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
747 mutex_init(&group->fanotify_data.access_mutex);
748 init_waitqueue_head(&group->fanotify_data.access_waitq);
749 INIT_LIST_HEAD(&group->fanotify_data.access_list);
750 atomic_set(&group->fanotify_data.bypass_perm, 0);
751 #endif
752 switch (flags & FAN_ALL_CLASS_BITS) {
753 case FAN_CLASS_NOTIF:
754 group->priority = FS_PRIO_0;
755 break;
756 case FAN_CLASS_CONTENT:
757 group->priority = FS_PRIO_1;
758 break;
759 case FAN_CLASS_PRE_CONTENT:
760 group->priority = FS_PRIO_2;
761 break;
762 default:
763 fd = -EINVAL;
764 goto out_put_group;
767 if (flags & FAN_UNLIMITED_QUEUE) {
768 fd = -EPERM;
769 if (!capable(CAP_SYS_ADMIN))
770 goto out_put_group;
771 group->max_events = UINT_MAX;
772 } else {
773 group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS;
776 if (flags & FAN_UNLIMITED_MARKS) {
777 fd = -EPERM;
778 if (!capable(CAP_SYS_ADMIN))
779 goto out_put_group;
780 group->fanotify_data.max_marks = UINT_MAX;
781 } else {
782 group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS;
785 fd = -EINVAL;
786 if (flags & FAN_READONLY_FALLBACK) {
787 if ((event_f_flags & O_ACCMODE) == O_RDWR)
788 group->fanotify_data.readonly_fallback = true;
789 else
790 goto out_put_group;
793 fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
794 if (fd < 0)
795 goto out_put_group;
797 return fd;
799 out_put_group:
800 fsnotify_put_group(group);
801 return fd;
804 SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags,
805 __u64 mask, int dfd,
806 const char __user * pathname)
808 struct inode *inode = NULL;
809 struct vfsmount *mnt = NULL;
810 struct fsnotify_group *group;
811 struct file *filp;
812 struct path path;
813 int ret, fput_needed;
815 pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
816 __func__, fanotify_fd, flags, dfd, pathname, mask);
818 /* we only use the lower 32 bits as of right now. */
819 if (mask & ((__u64)0xffffffff << 32))
820 return -EINVAL;
822 if (flags & ~FAN_ALL_MARK_FLAGS)
823 return -EINVAL;
824 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
825 case FAN_MARK_ADD: /* fallthrough */
826 case FAN_MARK_REMOVE:
827 if (!mask)
828 return -EINVAL;
829 case FAN_MARK_FLUSH:
830 break;
831 default:
832 return -EINVAL;
835 if (mask & FAN_ONDIR) {
836 flags |= FAN_MARK_ONDIR;
837 mask &= ~FAN_ONDIR;
840 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
841 if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD))
842 #else
843 if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD))
844 #endif
845 return -EINVAL;
847 filp = fget_light(fanotify_fd, &fput_needed);
848 if (unlikely(!filp))
849 return -EBADF;
851 /* verify that this is indeed an fanotify instance */
852 ret = -EINVAL;
853 if (unlikely(filp->f_op != &fanotify_fops))
854 goto fput_and_out;
855 group = filp->private_data;
858 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF. These are not
859 * allowed to set permissions events.
861 ret = -EINVAL;
862 if (mask & FAN_ALL_PERM_EVENTS &&
863 group->priority == FS_PRIO_0)
864 goto fput_and_out;
866 ret = fanotify_find_path(dfd, pathname, &path, flags);
867 if (ret)
868 goto fput_and_out;
870 /* inode held in place by reference to path; group by fget on fd */
871 if (!(flags & FAN_MARK_MOUNT))
872 inode = path.dentry->d_inode;
873 else
874 mnt = path.mnt;
876 /* create/update an inode mark */
877 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
878 case FAN_MARK_ADD:
879 if (flags & FAN_MARK_MOUNT)
880 ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
881 else
882 ret = fanotify_add_inode_mark(group, inode, mask, flags);
883 break;
884 case FAN_MARK_REMOVE:
885 if (flags & FAN_MARK_MOUNT)
886 ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
887 else
888 ret = fanotify_remove_inode_mark(group, inode, mask, flags);
889 break;
890 case FAN_MARK_FLUSH:
891 if (flags & FAN_MARK_MOUNT)
892 fsnotify_clear_vfsmount_marks_by_group(group);
893 else
894 fsnotify_clear_inode_marks_by_group(group);
895 break;
896 default:
897 ret = -EINVAL;
900 path_put(&path);
901 fput_and_out:
902 fput_light(filp, fput_needed);
903 return ret;
906 #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
907 asmlinkage long SyS_fanotify_mark(long fanotify_fd, long flags, __u64 mask,
908 long dfd, long pathname)
910 return SYSC_fanotify_mark((int) fanotify_fd, (unsigned int) flags,
911 mask, (int) dfd,
912 (const char __user *) pathname);
914 SYSCALL_ALIAS(sys_fanotify_mark, SyS_fanotify_mark);
915 #endif
918 * fanotify_user_setup - Our initialization function. Note that we cannot return
919 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
920 * must result in panic().
922 static int __init fanotify_user_setup(void)
924 fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
925 fanotify_response_event_cache = KMEM_CACHE(fanotify_response_event,
926 SLAB_PANIC);
928 return 0;
930 device_initcall(fanotify_user_setup);