1 #include <linux/fanotify.h>
2 #include <linux/fcntl.h>
3 #include <linux/file.h>
5 #include <linux/anon_inodes.h>
6 #include <linux/fsnotify_backend.h>
7 #include <linux/init.h>
8 #include <linux/mount.h>
9 #include <linux/namei.h>
10 #include <linux/poll.h>
11 #include <linux/security.h>
12 #include <linux/syscalls.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/uaccess.h>
16 #include <linux/compat.h>
18 #include <asm/ioctls.h>
20 #include "../../mount.h"
21 #include "../fdinfo.h"
23 #define FANOTIFY_DEFAULT_MAX_EVENTS 16384
24 #define FANOTIFY_DEFAULT_MAX_MARKS 8192
25 #define FANOTIFY_DEFAULT_MAX_LISTENERS 128
27 extern const struct fsnotify_ops fanotify_fsnotify_ops
;
29 static struct kmem_cache
*fanotify_mark_cache __read_mostly
;
30 static struct kmem_cache
*fanotify_response_event_cache __read_mostly
;
32 struct fanotify_response_event
{
33 struct list_head list
;
35 struct fsnotify_event
*event
;
39 * Get an fsnotify notification event if one exists and is small
40 * enough to fit in "count". Return an error pointer if the count
41 * is not large enough.
43 * Called with the group->notification_mutex held.
45 static struct fsnotify_event
*get_one_event(struct fsnotify_group
*group
,
48 BUG_ON(!mutex_is_locked(&group
->notification_mutex
));
50 pr_debug("%s: group=%p count=%zd\n", __func__
, group
, count
);
52 if (fsnotify_notify_queue_is_empty(group
))
55 if (FAN_EVENT_METADATA_LEN
> count
)
56 return ERR_PTR(-EINVAL
);
58 /* held the notification_mutex the whole time, so this is the
59 * same event we peeked above */
60 return fsnotify_remove_notify_event(group
);
63 static int create_fd(struct fsnotify_group
*group
,
64 struct fsnotify_event
*event
,
68 struct file
*new_file
;
70 pr_debug("%s: group=%p event=%p\n", __func__
, group
, event
);
72 client_fd
= get_unused_fd_flags(group
->fanotify_data
.f_flags
);
76 if (event
->data_type
!= FSNOTIFY_EVENT_PATH
) {
78 put_unused_fd(client_fd
);
83 * we need a new file handle for the userspace program so it can read even if it was
84 * originally opened O_WRONLY.
86 /* it's possible this event was an overflow event. in that case dentry and mnt
87 * are NULL; That's fine, just don't call dentry open */
88 if (event
->path
.dentry
&& event
->path
.mnt
)
89 new_file
= dentry_open(&event
->path
,
90 group
->fanotify_data
.f_flags
| FMODE_NONOTIFY
,
93 new_file
= ERR_PTR(-EOVERFLOW
);
94 if (IS_ERR(new_file
)) {
96 * we still send an event even if we can't open the file. this
97 * can happen when say tasks are gone and we try to open their
98 * /proc files or we try to open a WRONLY file like in sysfs
99 * we just send the errno to userspace since there isn't much
102 put_unused_fd(client_fd
);
103 client_fd
= PTR_ERR(new_file
);
111 static int fill_event_metadata(struct fsnotify_group
*group
,
112 struct fanotify_event_metadata
*metadata
,
113 struct fsnotify_event
*event
,
118 pr_debug("%s: group=%p metadata=%p event=%p\n", __func__
,
119 group
, metadata
, event
);
122 metadata
->event_len
= FAN_EVENT_METADATA_LEN
;
123 metadata
->metadata_len
= FAN_EVENT_METADATA_LEN
;
124 metadata
->vers
= FANOTIFY_METADATA_VERSION
;
125 metadata
->reserved
= 0;
126 metadata
->mask
= event
->mask
& FAN_ALL_OUTGOING_EVENTS
;
127 metadata
->pid
= pid_vnr(event
->tgid
);
128 if (unlikely(event
->mask
& FAN_Q_OVERFLOW
))
129 metadata
->fd
= FAN_NOFD
;
131 metadata
->fd
= create_fd(group
, event
, file
);
132 if (metadata
->fd
< 0)
139 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
140 static struct fanotify_response_event
*dequeue_re(struct fsnotify_group
*group
,
143 struct fanotify_response_event
*re
, *return_re
= NULL
;
145 mutex_lock(&group
->fanotify_data
.access_mutex
);
146 list_for_each_entry(re
, &group
->fanotify_data
.access_list
, list
) {
150 list_del_init(&re
->list
);
154 mutex_unlock(&group
->fanotify_data
.access_mutex
);
156 pr_debug("%s: found return_re=%p\n", __func__
, return_re
);
161 static int process_access_response(struct fsnotify_group
*group
,
162 struct fanotify_response
*response_struct
)
164 struct fanotify_response_event
*re
;
165 __s32 fd
= response_struct
->fd
;
166 __u32 response
= response_struct
->response
;
168 pr_debug("%s: group=%p fd=%d response=%d\n", __func__
, group
,
171 * make sure the response is valid, if invalid we do nothing and either
172 * userspace can send a valid response or we will clean it up after the
186 re
= dequeue_re(group
, fd
);
190 re
->event
->response
= response
;
192 wake_up(&group
->fanotify_data
.access_waitq
);
194 kmem_cache_free(fanotify_response_event_cache
, re
);
199 static int prepare_for_access_response(struct fsnotify_group
*group
,
200 struct fsnotify_event
*event
,
203 struct fanotify_response_event
*re
;
205 if (!(event
->mask
& FAN_ALL_PERM_EVENTS
))
208 re
= kmem_cache_alloc(fanotify_response_event_cache
, GFP_KERNEL
);
215 mutex_lock(&group
->fanotify_data
.access_mutex
);
217 if (atomic_read(&group
->fanotify_data
.bypass_perm
)) {
218 mutex_unlock(&group
->fanotify_data
.access_mutex
);
219 kmem_cache_free(fanotify_response_event_cache
, re
);
220 event
->response
= FAN_ALLOW
;
224 list_add_tail(&re
->list
, &group
->fanotify_data
.access_list
);
225 mutex_unlock(&group
->fanotify_data
.access_mutex
);
231 static int prepare_for_access_response(struct fsnotify_group
*group
,
232 struct fsnotify_event
*event
,
240 static ssize_t
copy_event_to_user(struct fsnotify_group
*group
,
241 struct fsnotify_event
*event
,
244 struct fanotify_event_metadata fanotify_event_metadata
;
248 pr_debug("%s: group=%p event=%p\n", __func__
, group
, event
);
250 ret
= fill_event_metadata(group
, &fanotify_event_metadata
, event
, &f
);
254 fd
= fanotify_event_metadata
.fd
;
256 if (copy_to_user(buf
, &fanotify_event_metadata
,
257 fanotify_event_metadata
.event_len
))
260 ret
= prepare_for_access_response(group
, event
, fd
);
266 return fanotify_event_metadata
.event_len
;
269 if (fd
!= FAN_NOFD
) {
274 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
275 if (event
->mask
& FAN_ALL_PERM_EVENTS
) {
276 event
->response
= FAN_DENY
;
277 wake_up(&group
->fanotify_data
.access_waitq
);
283 /* intofiy userspace file descriptor functions */
284 static unsigned int fanotify_poll(struct file
*file
, poll_table
*wait
)
286 struct fsnotify_group
*group
= file
->private_data
;
289 poll_wait(file
, &group
->notification_waitq
, wait
);
290 mutex_lock(&group
->notification_mutex
);
291 if (!fsnotify_notify_queue_is_empty(group
))
292 ret
= POLLIN
| POLLRDNORM
;
293 mutex_unlock(&group
->notification_mutex
);
298 static ssize_t
fanotify_read(struct file
*file
, char __user
*buf
,
299 size_t count
, loff_t
*pos
)
301 struct fsnotify_group
*group
;
302 struct fsnotify_event
*kevent
;
308 group
= file
->private_data
;
310 pr_debug("%s: group=%p\n", __func__
, group
);
313 prepare_to_wait(&group
->notification_waitq
, &wait
, TASK_INTERRUPTIBLE
);
315 mutex_lock(&group
->notification_mutex
);
316 kevent
= get_one_event(group
, count
);
317 mutex_unlock(&group
->notification_mutex
);
320 ret
= PTR_ERR(kevent
);
323 ret
= copy_event_to_user(group
, kevent
, buf
);
324 fsnotify_put_event(kevent
);
333 if (file
->f_flags
& O_NONBLOCK
)
336 if (signal_pending(current
))
345 finish_wait(&group
->notification_waitq
, &wait
);
346 if (start
!= buf
&& ret
!= -EFAULT
)
351 static ssize_t
fanotify_write(struct file
*file
, const char __user
*buf
, size_t count
, loff_t
*pos
)
353 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
354 struct fanotify_response response
= { .fd
= -1, .response
= -1 };
355 struct fsnotify_group
*group
;
358 group
= file
->private_data
;
360 if (count
> sizeof(response
))
361 count
= sizeof(response
);
363 pr_debug("%s: group=%p count=%zu\n", __func__
, group
, count
);
365 if (copy_from_user(&response
, buf
, count
))
368 ret
= process_access_response(group
, &response
);
378 static int fanotify_release(struct inode
*ignored
, struct file
*file
)
380 struct fsnotify_group
*group
= file
->private_data
;
382 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
383 struct fanotify_response_event
*re
, *lre
;
385 mutex_lock(&group
->fanotify_data
.access_mutex
);
387 atomic_inc(&group
->fanotify_data
.bypass_perm
);
389 list_for_each_entry_safe(re
, lre
, &group
->fanotify_data
.access_list
, list
) {
390 pr_debug("%s: found group=%p re=%p event=%p\n", __func__
, group
,
393 list_del_init(&re
->list
);
394 re
->event
->response
= FAN_ALLOW
;
396 kmem_cache_free(fanotify_response_event_cache
, re
);
398 mutex_unlock(&group
->fanotify_data
.access_mutex
);
400 wake_up(&group
->fanotify_data
.access_waitq
);
403 /* matches the fanotify_init->fsnotify_alloc_group */
404 fsnotify_destroy_group(group
);
409 static long fanotify_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
411 struct fsnotify_group
*group
;
412 struct fsnotify_event_holder
*holder
;
417 group
= file
->private_data
;
419 p
= (void __user
*) arg
;
423 mutex_lock(&group
->notification_mutex
);
424 list_for_each_entry(holder
, &group
->notification_list
, event_list
)
425 send_len
+= FAN_EVENT_METADATA_LEN
;
426 mutex_unlock(&group
->notification_mutex
);
427 ret
= put_user(send_len
, (int __user
*) p
);
434 static const struct file_operations fanotify_fops
= {
435 .show_fdinfo
= fanotify_show_fdinfo
,
436 .poll
= fanotify_poll
,
437 .read
= fanotify_read
,
438 .write
= fanotify_write
,
440 .release
= fanotify_release
,
441 .unlocked_ioctl
= fanotify_ioctl
,
442 .compat_ioctl
= fanotify_ioctl
,
443 .llseek
= noop_llseek
,
446 static void fanotify_free_mark(struct fsnotify_mark
*fsn_mark
)
448 kmem_cache_free(fanotify_mark_cache
, fsn_mark
);
451 static int fanotify_find_path(int dfd
, const char __user
*filename
,
452 struct path
*path
, unsigned int flags
)
456 pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__
,
457 dfd
, filename
, flags
);
459 if (filename
== NULL
) {
460 struct fd f
= fdget(dfd
);
467 if ((flags
& FAN_MARK_ONLYDIR
) &&
468 !(S_ISDIR(file_inode(f
.file
)->i_mode
))) {
473 *path
= f
.file
->f_path
;
477 unsigned int lookup_flags
= 0;
479 if (!(flags
& FAN_MARK_DONT_FOLLOW
))
480 lookup_flags
|= LOOKUP_FOLLOW
;
481 if (flags
& FAN_MARK_ONLYDIR
)
482 lookup_flags
|= LOOKUP_DIRECTORY
;
484 ret
= user_path_at(dfd
, filename
, lookup_flags
, path
);
489 /* you can only watch an inode if you have read permissions on it */
490 ret
= inode_permission(path
->dentry
->d_inode
, MAY_READ
);
497 static __u32
fanotify_mark_remove_from_mask(struct fsnotify_mark
*fsn_mark
,
504 spin_lock(&fsn_mark
->lock
);
505 if (!(flags
& FAN_MARK_IGNORED_MASK
)) {
506 oldmask
= fsn_mark
->mask
;
507 fsnotify_set_mark_mask_locked(fsn_mark
, (oldmask
& ~mask
));
509 oldmask
= fsn_mark
->ignored_mask
;
510 fsnotify_set_mark_ignored_mask_locked(fsn_mark
, (oldmask
& ~mask
));
512 spin_unlock(&fsn_mark
->lock
);
514 *destroy
= !(oldmask
& ~mask
);
516 return mask
& oldmask
;
519 static int fanotify_remove_vfsmount_mark(struct fsnotify_group
*group
,
520 struct vfsmount
*mnt
, __u32 mask
,
523 struct fsnotify_mark
*fsn_mark
= NULL
;
527 mutex_lock(&group
->mark_mutex
);
528 fsn_mark
= fsnotify_find_vfsmount_mark(group
, mnt
);
530 mutex_unlock(&group
->mark_mutex
);
534 removed
= fanotify_mark_remove_from_mask(fsn_mark
, mask
, flags
,
537 fsnotify_destroy_mark_locked(fsn_mark
, group
);
538 mutex_unlock(&group
->mark_mutex
);
540 fsnotify_put_mark(fsn_mark
);
541 if (removed
& real_mount(mnt
)->mnt_fsnotify_mask
)
542 fsnotify_recalc_vfsmount_mask(mnt
);
547 static int fanotify_remove_inode_mark(struct fsnotify_group
*group
,
548 struct inode
*inode
, __u32 mask
,
551 struct fsnotify_mark
*fsn_mark
= NULL
;
555 mutex_lock(&group
->mark_mutex
);
556 fsn_mark
= fsnotify_find_inode_mark(group
, inode
);
558 mutex_unlock(&group
->mark_mutex
);
562 removed
= fanotify_mark_remove_from_mask(fsn_mark
, mask
, flags
,
565 fsnotify_destroy_mark_locked(fsn_mark
, group
);
566 mutex_unlock(&group
->mark_mutex
);
568 /* matches the fsnotify_find_inode_mark() */
569 fsnotify_put_mark(fsn_mark
);
570 if (removed
& inode
->i_fsnotify_mask
)
571 fsnotify_recalc_inode_mask(inode
);
576 static __u32
fanotify_mark_add_to_mask(struct fsnotify_mark
*fsn_mark
,
582 spin_lock(&fsn_mark
->lock
);
583 if (!(flags
& FAN_MARK_IGNORED_MASK
)) {
584 oldmask
= fsn_mark
->mask
;
585 fsnotify_set_mark_mask_locked(fsn_mark
, (oldmask
| mask
));
587 __u32 tmask
= fsn_mark
->ignored_mask
| mask
;
588 fsnotify_set_mark_ignored_mask_locked(fsn_mark
, tmask
);
589 if (flags
& FAN_MARK_IGNORED_SURV_MODIFY
)
590 fsn_mark
->flags
|= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY
;
593 if (!(flags
& FAN_MARK_ONDIR
)) {
594 __u32 tmask
= fsn_mark
->ignored_mask
| FAN_ONDIR
;
595 fsnotify_set_mark_ignored_mask_locked(fsn_mark
, tmask
);
598 spin_unlock(&fsn_mark
->lock
);
600 return mask
& ~oldmask
;
603 static struct fsnotify_mark
*fanotify_add_new_mark(struct fsnotify_group
*group
,
605 struct vfsmount
*mnt
)
607 struct fsnotify_mark
*mark
;
610 if (atomic_read(&group
->num_marks
) > group
->fanotify_data
.max_marks
)
611 return ERR_PTR(-ENOSPC
);
613 mark
= kmem_cache_alloc(fanotify_mark_cache
, GFP_KERNEL
);
615 return ERR_PTR(-ENOMEM
);
617 fsnotify_init_mark(mark
, fanotify_free_mark
);
618 ret
= fsnotify_add_mark_locked(mark
, group
, inode
, mnt
, 0);
620 fsnotify_put_mark(mark
);
628 static int fanotify_add_vfsmount_mark(struct fsnotify_group
*group
,
629 struct vfsmount
*mnt
, __u32 mask
,
632 struct fsnotify_mark
*fsn_mark
;
635 mutex_lock(&group
->mark_mutex
);
636 fsn_mark
= fsnotify_find_vfsmount_mark(group
, mnt
);
638 fsn_mark
= fanotify_add_new_mark(group
, NULL
, mnt
);
639 if (IS_ERR(fsn_mark
)) {
640 mutex_unlock(&group
->mark_mutex
);
641 return PTR_ERR(fsn_mark
);
644 added
= fanotify_mark_add_to_mask(fsn_mark
, mask
, flags
);
645 mutex_unlock(&group
->mark_mutex
);
647 if (added
& ~real_mount(mnt
)->mnt_fsnotify_mask
)
648 fsnotify_recalc_vfsmount_mask(mnt
);
650 fsnotify_put_mark(fsn_mark
);
654 static int fanotify_add_inode_mark(struct fsnotify_group
*group
,
655 struct inode
*inode
, __u32 mask
,
658 struct fsnotify_mark
*fsn_mark
;
661 pr_debug("%s: group=%p inode=%p\n", __func__
, group
, inode
);
664 * If some other task has this inode open for write we should not add
665 * an ignored mark, unless that ignored mark is supposed to survive
666 * modification changes anyway.
668 if ((flags
& FAN_MARK_IGNORED_MASK
) &&
669 !(flags
& FAN_MARK_IGNORED_SURV_MODIFY
) &&
670 (atomic_read(&inode
->i_writecount
) > 0))
673 mutex_lock(&group
->mark_mutex
);
674 fsn_mark
= fsnotify_find_inode_mark(group
, inode
);
676 fsn_mark
= fanotify_add_new_mark(group
, inode
, NULL
);
677 if (IS_ERR(fsn_mark
)) {
678 mutex_unlock(&group
->mark_mutex
);
679 return PTR_ERR(fsn_mark
);
682 added
= fanotify_mark_add_to_mask(fsn_mark
, mask
, flags
);
683 mutex_unlock(&group
->mark_mutex
);
685 if (added
& ~inode
->i_fsnotify_mask
)
686 fsnotify_recalc_inode_mask(inode
);
688 fsnotify_put_mark(fsn_mark
);
692 /* fanotify syscalls */
693 SYSCALL_DEFINE2(fanotify_init
, unsigned int, flags
, unsigned int, event_f_flags
)
695 struct fsnotify_group
*group
;
697 struct user_struct
*user
;
699 pr_debug("%s: flags=%d event_f_flags=%d\n",
700 __func__
, flags
, event_f_flags
);
702 if (!capable(CAP_SYS_ADMIN
))
705 if (flags
& ~FAN_ALL_INIT_FLAGS
)
708 user
= get_current_user();
709 if (atomic_read(&user
->fanotify_listeners
) > FANOTIFY_DEFAULT_MAX_LISTENERS
) {
714 f_flags
= O_RDWR
| FMODE_NONOTIFY
;
715 if (flags
& FAN_CLOEXEC
)
716 f_flags
|= O_CLOEXEC
;
717 if (flags
& FAN_NONBLOCK
)
718 f_flags
|= O_NONBLOCK
;
720 /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */
721 group
= fsnotify_alloc_group(&fanotify_fsnotify_ops
);
724 return PTR_ERR(group
);
727 group
->fanotify_data
.user
= user
;
728 atomic_inc(&user
->fanotify_listeners
);
730 group
->fanotify_data
.f_flags
= event_f_flags
;
731 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
732 mutex_init(&group
->fanotify_data
.access_mutex
);
733 init_waitqueue_head(&group
->fanotify_data
.access_waitq
);
734 INIT_LIST_HEAD(&group
->fanotify_data
.access_list
);
735 atomic_set(&group
->fanotify_data
.bypass_perm
, 0);
737 switch (flags
& FAN_ALL_CLASS_BITS
) {
738 case FAN_CLASS_NOTIF
:
739 group
->priority
= FS_PRIO_0
;
741 case FAN_CLASS_CONTENT
:
742 group
->priority
= FS_PRIO_1
;
744 case FAN_CLASS_PRE_CONTENT
:
745 group
->priority
= FS_PRIO_2
;
749 goto out_destroy_group
;
752 if (flags
& FAN_UNLIMITED_QUEUE
) {
754 if (!capable(CAP_SYS_ADMIN
))
755 goto out_destroy_group
;
756 group
->max_events
= UINT_MAX
;
758 group
->max_events
= FANOTIFY_DEFAULT_MAX_EVENTS
;
761 if (flags
& FAN_UNLIMITED_MARKS
) {
763 if (!capable(CAP_SYS_ADMIN
))
764 goto out_destroy_group
;
765 group
->fanotify_data
.max_marks
= UINT_MAX
;
767 group
->fanotify_data
.max_marks
= FANOTIFY_DEFAULT_MAX_MARKS
;
770 fd
= anon_inode_getfd("[fanotify]", &fanotify_fops
, group
, f_flags
);
772 goto out_destroy_group
;
777 fsnotify_destroy_group(group
);
781 SYSCALL_DEFINE5(fanotify_mark
, int, fanotify_fd
, unsigned int, flags
,
782 __u64
, mask
, int, dfd
,
783 const char __user
*, pathname
)
785 struct inode
*inode
= NULL
;
786 struct vfsmount
*mnt
= NULL
;
787 struct fsnotify_group
*group
;
792 pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
793 __func__
, fanotify_fd
, flags
, dfd
, pathname
, mask
);
795 /* we only use the lower 32 bits as of right now. */
796 if (mask
& ((__u64
)0xffffffff << 32))
799 if (flags
& ~FAN_ALL_MARK_FLAGS
)
801 switch (flags
& (FAN_MARK_ADD
| FAN_MARK_REMOVE
| FAN_MARK_FLUSH
)) {
802 case FAN_MARK_ADD
: /* fallthrough */
803 case FAN_MARK_REMOVE
:
812 if (mask
& FAN_ONDIR
) {
813 flags
|= FAN_MARK_ONDIR
;
817 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
818 if (mask
& ~(FAN_ALL_EVENTS
| FAN_ALL_PERM_EVENTS
| FAN_EVENT_ON_CHILD
))
820 if (mask
& ~(FAN_ALL_EVENTS
| FAN_EVENT_ON_CHILD
))
824 f
= fdget(fanotify_fd
);
825 if (unlikely(!f
.file
))
828 /* verify that this is indeed an fanotify instance */
830 if (unlikely(f
.file
->f_op
!= &fanotify_fops
))
832 group
= f
.file
->private_data
;
835 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF. These are not
836 * allowed to set permissions events.
839 if (mask
& FAN_ALL_PERM_EVENTS
&&
840 group
->priority
== FS_PRIO_0
)
843 ret
= fanotify_find_path(dfd
, pathname
, &path
, flags
);
847 /* inode held in place by reference to path; group by fget on fd */
848 if (!(flags
& FAN_MARK_MOUNT
))
849 inode
= path
.dentry
->d_inode
;
853 /* create/update an inode mark */
854 switch (flags
& (FAN_MARK_ADD
| FAN_MARK_REMOVE
| FAN_MARK_FLUSH
)) {
856 if (flags
& FAN_MARK_MOUNT
)
857 ret
= fanotify_add_vfsmount_mark(group
, mnt
, mask
, flags
);
859 ret
= fanotify_add_inode_mark(group
, inode
, mask
, flags
);
861 case FAN_MARK_REMOVE
:
862 if (flags
& FAN_MARK_MOUNT
)
863 ret
= fanotify_remove_vfsmount_mark(group
, mnt
, mask
, flags
);
865 ret
= fanotify_remove_inode_mark(group
, inode
, mask
, flags
);
868 if (flags
& FAN_MARK_MOUNT
)
869 fsnotify_clear_vfsmount_marks_by_group(group
);
871 fsnotify_clear_inode_marks_by_group(group
);
884 COMPAT_SYSCALL_DEFINE6(fanotify_mark
,
885 int, fanotify_fd
, unsigned int, flags
,
886 __u32
, mask0
, __u32
, mask1
, int, dfd
,
887 const char __user
*, pathname
)
889 return sys_fanotify_mark(fanotify_fd
, flags
,
891 ((__u64
)mask0
<< 32) | mask1
,
893 ((__u64
)mask1
<< 32) | mask0
,
900 * fanotify_user_setup - Our initialization function. Note that we cannot return
901 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
902 * must result in panic().
904 static int __init
fanotify_user_setup(void)
906 fanotify_mark_cache
= KMEM_CACHE(fsnotify_mark
, SLAB_PANIC
);
907 fanotify_response_event_cache
= KMEM_CACHE(fanotify_response_event
,
912 device_initcall(fanotify_user_setup
);