Linux 4.18.10
[linux/fpc-iii.git] / fs / notify / fanotify / fanotify.c
blobf90842efea13c95390e85cb22897cadbaa376700
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/fanotify.h>
3 #include <linux/fdtable.h>
4 #include <linux/fsnotify_backend.h>
5 #include <linux/init.h>
6 #include <linux/jiffies.h>
7 #include <linux/kernel.h> /* UINT_MAX */
8 #include <linux/mount.h>
9 #include <linux/sched.h>
10 #include <linux/sched/user.h>
11 #include <linux/types.h>
12 #include <linux/wait.h>
13 #include <linux/audit.h>
15 #include "fanotify.h"
17 static bool should_merge(struct fsnotify_event *old_fsn,
18 struct fsnotify_event *new_fsn)
20 struct fanotify_event_info *old, *new;
22 pr_debug("%s: old=%p new=%p\n", __func__, old_fsn, new_fsn);
23 old = FANOTIFY_E(old_fsn);
24 new = FANOTIFY_E(new_fsn);
26 if (old_fsn->inode == new_fsn->inode && old->tgid == new->tgid &&
27 old->path.mnt == new->path.mnt &&
28 old->path.dentry == new->path.dentry)
29 return true;
30 return false;
33 /* and the list better be locked by something too! */
34 static int fanotify_merge(struct list_head *list, struct fsnotify_event *event)
36 struct fsnotify_event *test_event;
38 pr_debug("%s: list=%p event=%p\n", __func__, list, event);
41 * Don't merge a permission event with any other event so that we know
42 * the event structure we have created in fanotify_handle_event() is the
43 * one we should check for permission response.
45 if (fanotify_is_perm_event(event->mask))
46 return 0;
48 list_for_each_entry_reverse(test_event, list, list) {
49 if (should_merge(test_event, event)) {
50 test_event->mask |= event->mask;
51 return 1;
55 return 0;
58 static int fanotify_get_response(struct fsnotify_group *group,
59 struct fanotify_perm_event_info *event,
60 struct fsnotify_iter_info *iter_info)
62 int ret;
64 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
66 wait_event(group->fanotify_data.access_waitq, event->response);
68 /* userspace responded, convert to something usable */
69 switch (event->response & ~FAN_AUDIT) {
70 case FAN_ALLOW:
71 ret = 0;
72 break;
73 case FAN_DENY:
74 default:
75 ret = -EPERM;
78 /* Check if the response should be audited */
79 if (event->response & FAN_AUDIT)
80 audit_fanotify(event->response & ~FAN_AUDIT);
82 event->response = 0;
84 pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__,
85 group, event, ret);
87 return ret;
90 static bool fanotify_should_send_event(struct fsnotify_iter_info *iter_info,
91 u32 event_mask, const void *data,
92 int data_type)
94 __u32 marks_mask = 0, marks_ignored_mask = 0;
95 const struct path *path = data;
96 struct fsnotify_mark *mark;
97 int type;
99 pr_debug("%s: report_mask=%x mask=%x data=%p data_type=%d\n",
100 __func__, iter_info->report_mask, event_mask, data, data_type);
102 /* if we don't have enough info to send an event to userspace say no */
103 if (data_type != FSNOTIFY_EVENT_PATH)
104 return false;
106 /* sorry, fanotify only gives a damn about files and dirs */
107 if (!d_is_reg(path->dentry) &&
108 !d_can_lookup(path->dentry))
109 return false;
111 fsnotify_foreach_obj_type(type) {
112 if (!fsnotify_iter_should_report_type(iter_info, type))
113 continue;
114 mark = iter_info->marks[type];
116 * if the event is for a child and this inode doesn't care about
117 * events on the child, don't send it!
119 if (type == FSNOTIFY_OBJ_TYPE_INODE &&
120 (event_mask & FS_EVENT_ON_CHILD) &&
121 !(mark->mask & FS_EVENT_ON_CHILD))
122 continue;
124 marks_mask |= mark->mask;
125 marks_ignored_mask |= mark->ignored_mask;
128 if (d_is_dir(path->dentry) &&
129 !(marks_mask & FS_ISDIR & ~marks_ignored_mask))
130 return false;
132 if (event_mask & FAN_ALL_OUTGOING_EVENTS & marks_mask &
133 ~marks_ignored_mask)
134 return true;
136 return false;
139 struct fanotify_event_info *fanotify_alloc_event(struct fsnotify_group *group,
140 struct inode *inode, u32 mask,
141 const struct path *path)
143 struct fanotify_event_info *event;
144 gfp_t gfp = GFP_KERNEL;
147 * For queues with unlimited length lost events are not expected and
148 * can possibly have security implications. Avoid losing events when
149 * memory is short.
151 if (group->max_events == UINT_MAX)
152 gfp |= __GFP_NOFAIL;
154 if (fanotify_is_perm_event(mask)) {
155 struct fanotify_perm_event_info *pevent;
157 pevent = kmem_cache_alloc(fanotify_perm_event_cachep, gfp);
158 if (!pevent)
159 return NULL;
160 event = &pevent->fae;
161 pevent->response = 0;
162 goto init;
164 event = kmem_cache_alloc(fanotify_event_cachep, gfp);
165 if (!event)
166 return NULL;
167 init: __maybe_unused
168 fsnotify_init_event(&event->fse, inode, mask);
169 event->tgid = get_pid(task_tgid(current));
170 if (path) {
171 event->path = *path;
172 path_get(&event->path);
173 } else {
174 event->path.mnt = NULL;
175 event->path.dentry = NULL;
177 return event;
180 static int fanotify_handle_event(struct fsnotify_group *group,
181 struct inode *inode,
182 u32 mask, const void *data, int data_type,
183 const unsigned char *file_name, u32 cookie,
184 struct fsnotify_iter_info *iter_info)
186 int ret = 0;
187 struct fanotify_event_info *event;
188 struct fsnotify_event *fsn_event;
190 BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS);
191 BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY);
192 BUILD_BUG_ON(FAN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
193 BUILD_BUG_ON(FAN_CLOSE_WRITE != FS_CLOSE_WRITE);
194 BUILD_BUG_ON(FAN_OPEN != FS_OPEN);
195 BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD);
196 BUILD_BUG_ON(FAN_Q_OVERFLOW != FS_Q_OVERFLOW);
197 BUILD_BUG_ON(FAN_OPEN_PERM != FS_OPEN_PERM);
198 BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM);
199 BUILD_BUG_ON(FAN_ONDIR != FS_ISDIR);
201 if (!fanotify_should_send_event(iter_info, mask, data, data_type))
202 return 0;
204 pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode,
205 mask);
207 if (fanotify_is_perm_event(mask)) {
209 * fsnotify_prepare_user_wait() fails if we race with mark
210 * deletion. Just let the operation pass in that case.
212 if (!fsnotify_prepare_user_wait(iter_info))
213 return 0;
216 event = fanotify_alloc_event(group, inode, mask, data);
217 ret = -ENOMEM;
218 if (unlikely(!event)) {
220 * We don't queue overflow events for permission events as
221 * there the access is denied and so no event is in fact lost.
223 if (!fanotify_is_perm_event(mask))
224 fsnotify_queue_overflow(group);
225 goto finish;
228 fsn_event = &event->fse;
229 ret = fsnotify_add_event(group, fsn_event, fanotify_merge);
230 if (ret) {
231 /* Permission events shouldn't be merged */
232 BUG_ON(ret == 1 && mask & FAN_ALL_PERM_EVENTS);
233 /* Our event wasn't used in the end. Free it. */
234 fsnotify_destroy_event(group, fsn_event);
236 ret = 0;
237 } else if (fanotify_is_perm_event(mask)) {
238 ret = fanotify_get_response(group, FANOTIFY_PE(fsn_event),
239 iter_info);
240 fsnotify_destroy_event(group, fsn_event);
242 finish:
243 if (fanotify_is_perm_event(mask))
244 fsnotify_finish_user_wait(iter_info);
246 return ret;
249 static void fanotify_free_group_priv(struct fsnotify_group *group)
251 struct user_struct *user;
253 user = group->fanotify_data.user;
254 atomic_dec(&user->fanotify_listeners);
255 free_uid(user);
258 static void fanotify_free_event(struct fsnotify_event *fsn_event)
260 struct fanotify_event_info *event;
262 event = FANOTIFY_E(fsn_event);
263 path_put(&event->path);
264 put_pid(event->tgid);
265 if (fanotify_is_perm_event(fsn_event->mask)) {
266 kmem_cache_free(fanotify_perm_event_cachep,
267 FANOTIFY_PE(fsn_event));
268 return;
270 kmem_cache_free(fanotify_event_cachep, event);
273 static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
275 kmem_cache_free(fanotify_mark_cache, fsn_mark);
278 const struct fsnotify_ops fanotify_fsnotify_ops = {
279 .handle_event = fanotify_handle_event,
280 .free_group_priv = fanotify_free_group_priv,
281 .free_event = fanotify_free_event,
282 .free_mark = fanotify_free_mark,