2 * fs/inotify_user.c - inotify support for userspace
5 * John McCutchan <ttb@tentacle.dhs.org>
6 * Robert Love <rml@novell.com>
8 * Copyright (C) 2005 John McCutchan
9 * Copyright 2006 Hewlett-Packard Development Company, L.P.
11 * Copyright (C) 2009 Eric Paris <Red Hat Inc>
12 * inotify was largely rewriten to make use of the fsnotify infrastructure
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2, or (at your option) any
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
25 #include <linux/fs.h> /* struct inode */
26 #include <linux/fsnotify_backend.h>
27 #include <linux/inotify.h>
28 #include <linux/path.h> /* struct path */
29 #include <linux/slab.h> /* kmem_* */
30 #include <linux/types.h>
31 #include <linux/sched.h>
35 static int inotify_handle_event(struct fsnotify_group
*group
, struct fsnotify_event
*event
)
37 struct fsnotify_mark_entry
*entry
;
38 struct inotify_inode_mark_entry
*ientry
;
39 struct inode
*to_tell
;
40 struct inotify_event_private_data
*event_priv
;
41 struct fsnotify_event_private_data
*fsn_event_priv
;
44 to_tell
= event
->to_tell
;
46 spin_lock(&to_tell
->i_lock
);
47 entry
= fsnotify_find_mark_entry(group
, to_tell
);
48 spin_unlock(&to_tell
->i_lock
);
49 /* race with watch removal? We already passes should_send */
52 ientry
= container_of(entry
, struct inotify_inode_mark_entry
,
56 event_priv
= kmem_cache_alloc(event_priv_cachep
, GFP_KERNEL
);
57 if (unlikely(!event_priv
))
60 fsn_event_priv
= &event_priv
->fsnotify_event_priv_data
;
62 fsn_event_priv
->group
= group
;
65 ret
= fsnotify_add_notify_event(group
, event
, fsn_event_priv
);
67 inotify_free_event_priv(fsn_event_priv
);
68 /* EEXIST says we tail matched, EOVERFLOW isn't something
69 * to report up the stack. */
70 if ((ret
== -EEXIST
) ||
76 * If we hold the entry until after the event is on the queue
77 * IN_IGNORED won't be able to pass this event in the queue
79 fsnotify_put_mark(entry
);
84 static void inotify_freeing_mark(struct fsnotify_mark_entry
*entry
, struct fsnotify_group
*group
)
86 inotify_ignored_and_remove_idr(entry
, group
);
89 static bool inotify_should_send_event(struct fsnotify_group
*group
, struct inode
*inode
, __u32 mask
)
91 struct fsnotify_mark_entry
*entry
;
94 spin_lock(&inode
->i_lock
);
95 entry
= fsnotify_find_mark_entry(group
, inode
);
96 spin_unlock(&inode
->i_lock
);
100 mask
= (mask
& ~FS_EVENT_ON_CHILD
);
101 send
= (entry
->mask
& mask
);
103 /* find took a reference */
104 fsnotify_put_mark(entry
);
110 * This is NEVER supposed to be called. Inotify marks should either have been
111 * removed from the idr when the watch was removed or in the
112 * fsnotify_destroy_mark_by_group() call when the inotify instance was being
113 * torn down. This is only called if the idr is about to be freed but there
114 * are still marks in it.
116 static int idr_callback(int id
, void *p
, void *data
)
118 struct fsnotify_mark_entry
*entry
;
119 struct inotify_inode_mark_entry
*ientry
;
120 static bool warned
= false;
127 ientry
= container_of(entry
, struct inotify_inode_mark_entry
, fsn_entry
);
129 WARN(1, "inotify closing but id=%d for entry=%p in group=%p still in "
130 "idr. Probably leaking memory\n", id
, p
, data
);
133 * I'm taking the liberty of assuming that the mark in question is a
134 * valid address and I'm dereferencing it. This might help to figure
135 * out why we got here and the panic is no worse than the original
136 * BUG() that was here.
139 printk(KERN_WARNING
"entry->group=%p inode=%p wd=%d\n",
140 entry
->group
, entry
->inode
, ientry
->wd
);
144 static void inotify_free_group_priv(struct fsnotify_group
*group
)
146 /* ideally the idr is empty and we won't hit the BUG in teh callback */
147 idr_for_each(&group
->inotify_data
.idr
, idr_callback
, group
);
148 idr_remove_all(&group
->inotify_data
.idr
);
149 idr_destroy(&group
->inotify_data
.idr
);
150 free_uid(group
->inotify_data
.user
);
153 void inotify_free_event_priv(struct fsnotify_event_private_data
*fsn_event_priv
)
155 struct inotify_event_private_data
*event_priv
;
158 event_priv
= container_of(fsn_event_priv
, struct inotify_event_private_data
,
159 fsnotify_event_priv_data
);
161 kmem_cache_free(event_priv_cachep
, event_priv
);
164 const struct fsnotify_ops inotify_fsnotify_ops
= {
165 .handle_event
= inotify_handle_event
,
166 .should_send_event
= inotify_should_send_event
,
167 .free_group_priv
= inotify_free_group_priv
,
168 .free_event_priv
= inotify_free_event_priv
,
169 .freeing_mark
= inotify_freeing_mark
,