Merge branch 'test-ip_mreq_source-android-only' into 'master'
[glib.git] / gio / inotify / inotify-kernel.c
blob9a2e5008ffcd917a2bb846d1ae6513ba5b218adf
1 /*
2 Copyright (C) 2005 John McCutchan
3 Copyright © 2015 Canonical Limited
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public License
16 along with this library; if not, see <http://www.gnu.org/licenses/>.
18 Authors:
19 Ryan Lortie <desrt@desrt.ca>
20 John McCutchan <john@johnmccutchan.com>
23 #include "config.h"
25 #include <stdio.h>
26 #include <sys/ioctl.h>
27 #include <unistd.h>
28 #include <errno.h>
29 #include <string.h>
30 #include <glib.h>
31 #include "inotify-kernel.h"
32 #include <sys/inotify.h>
33 #include <glib/glib-unix.h>
35 #include "glib-private.h"
37 /* From inotify(7) */
38 #define MAX_EVENT_SIZE (sizeof(struct inotify_event) + NAME_MAX + 1)
40 /* Amount of time to sleep on receipt of uninteresting events */
41 #define BOREDOM_SLEEP_TIME (100 * G_TIME_SPAN_MILLISECOND)
43 /* Define limits on the maximum amount of time and maximum amount of
44 * interceding events between FROM/TO that can be merged.
46 #define MOVE_PAIR_DELAY (10 * G_TIME_SPAN_MILLISECOND)
47 #define MOVE_PAIR_DISTANCE (100)
49 /* We use the lock from inotify-helper.c
51 * We only have to take it on our read callback.
53 * The rest of locking is taken care of in inotify-helper.c
55 G_LOCK_EXTERN (inotify_lock);
57 static ik_event_t *
58 ik_event_new (struct inotify_event *kevent,
59 gint64 now)
61 ik_event_t *event = g_new0 (ik_event_t, 1);
63 event->wd = kevent->wd;
64 event->mask = kevent->mask;
65 event->cookie = kevent->cookie;
66 event->len = kevent->len;
67 event->timestamp = now;
68 if (event->len)
69 event->name = g_strdup (kevent->name);
70 else
71 event->name = NULL;
73 return event;
76 void
77 _ik_event_free (ik_event_t *event)
79 if (event->pair)
81 event->pair->pair = NULL;
82 _ik_event_free (event->pair);
85 g_free (event->name);
86 g_free (event);
89 typedef struct
91 GSource source;
93 GQueue queue;
94 gpointer fd_tag;
95 gint fd;
97 GHashTable *unmatched_moves;
98 gboolean is_bored;
99 } InotifyKernelSource;
101 static InotifyKernelSource *inotify_source;
103 static gint64
104 ik_source_get_dispatch_time (InotifyKernelSource *iks)
106 ik_event_t *head;
108 head = g_queue_peek_head (&iks->queue);
110 /* nothing in the queue: not ready */
111 if (!head)
112 return -1;
114 /* if it's not an unpaired move, it is ready now */
115 if (~head->mask & IN_MOVED_FROM || head->pair)
116 return 0;
118 /* if the queue is too long then it's ready now */
119 if (iks->queue.length > MOVE_PAIR_DISTANCE)
120 return 0;
122 /* otherwise, it's ready after the delay */
123 return head->timestamp + MOVE_PAIR_DELAY;
126 static gboolean
127 ik_source_can_dispatch_now (InotifyKernelSource *iks,
128 gint64 now)
130 gint64 dispatch_time;
132 dispatch_time = ik_source_get_dispatch_time (iks);
134 return 0 <= dispatch_time && dispatch_time <= now;
137 static gsize
138 ik_source_read_some_events (InotifyKernelSource *iks,
139 gchar *buffer,
140 gsize buffer_len)
142 gssize result;
143 int errsv;
145 again:
146 result = read (iks->fd, buffer, buffer_len);
147 errsv = errno;
149 if (result < 0)
151 if (errsv == EINTR)
152 goto again;
154 if (errsv == EAGAIN)
155 return 0;
157 g_error ("inotify read(): %s", g_strerror (errsv));
159 else if (result == 0)
160 g_error ("inotify unexpectedly hit eof");
162 return result;
165 static gchar *
166 ik_source_read_all_the_events (InotifyKernelSource *iks,
167 gchar *buffer,
168 gsize buffer_len,
169 gsize *length_out)
171 gsize n_read;
173 n_read = ik_source_read_some_events (iks, buffer, buffer_len);
175 /* Check if we might have gotten another event if we had passed in a
176 * bigger buffer...
178 if (n_read + MAX_EVENT_SIZE > buffer_len)
180 gchar *new_buffer;
181 guint n_readable;
182 gint result;
183 int errsv;
185 /* figure out how many more bytes there are to read */
186 result = ioctl (iks->fd, FIONREAD, &n_readable);
187 errsv = errno;
188 if (result != 0)
189 g_error ("inotify ioctl(FIONREAD): %s", g_strerror (errsv));
191 if (n_readable != 0)
193 /* there is in fact more data. allocate a new buffer, copy
194 * the existing data, and then append the remaining.
196 new_buffer = g_malloc (n_read + n_readable);
197 memcpy (new_buffer, buffer, n_read);
198 n_read += ik_source_read_some_events (iks, new_buffer + n_read, n_readable);
200 buffer = new_buffer;
202 /* There may be new events in the buffer that were added after
203 * the FIONREAD was performed, but we can't risk getting into
204 * a loop. We'll get them next time.
209 *length_out = n_read;
211 return buffer;
214 static gboolean
215 ik_source_dispatch (GSource *source,
216 GSourceFunc func,
217 gpointer user_data)
219 InotifyKernelSource *iks = (InotifyKernelSource *) source;
220 gboolean (*user_callback) (ik_event_t *event) = (void *) func;
221 gboolean interesting = FALSE;
222 gint64 now;
224 now = g_source_get_time (source);
226 if (iks->is_bored || g_source_query_unix_fd (source, iks->fd_tag))
228 gchar stack_buffer[4096];
229 gsize buffer_len;
230 gchar *buffer;
231 gsize offset;
233 /* We want to read all of the available events.
235 * We need to do it in a finite number of steps so that we don't
236 * get caught in a loop of read() with another process
237 * continuously adding events each time we drain them.
239 * In the normal case we will have only a few events in the queue,
240 * so start out by reading into a small stack-allocated buffer.
241 * Even though we're on a fresh stack frame, there is no need to
242 * pointlessly blow up with the size of the worker thread stack
243 * with a huge buffer here.
245 * If the result is large enough to cause us to suspect that
246 * another event may be pending then we allocate a buffer on the
247 * heap that can hold all of the events and read (once!) into that
248 * buffer.
250 buffer = ik_source_read_all_the_events (iks, stack_buffer, sizeof stack_buffer, &buffer_len);
252 offset = 0;
254 while (offset < buffer_len)
256 struct inotify_event *kevent = (struct inotify_event *) (buffer + offset);
257 ik_event_t *event;
259 event = ik_event_new (kevent, now);
261 offset += sizeof (struct inotify_event) + event->len;
263 if (event->mask & IN_MOVED_TO)
265 ik_event_t *pair;
267 pair = g_hash_table_lookup (iks->unmatched_moves, GUINT_TO_POINTER (event->cookie));
268 if (pair != NULL)
270 g_assert (!pair->pair);
272 g_hash_table_remove (iks->unmatched_moves, GUINT_TO_POINTER (event->cookie));
273 event->is_second_in_pair = TRUE;
274 event->pair = pair;
275 pair->pair = event;
276 continue;
279 interesting = TRUE;
282 else if (event->mask & IN_MOVED_FROM)
284 gboolean new;
286 new = g_hash_table_insert (iks->unmatched_moves, GUINT_TO_POINTER (event->cookie), event);
287 if G_UNLIKELY (!new)
288 g_warning ("inotify: got IN_MOVED_FROM event with already-pending cookie %#x", event->cookie);
290 interesting = TRUE;
293 g_queue_push_tail (&iks->queue, event);
296 if (buffer_len == 0)
298 /* We can end up reading nothing if we arrived here due to a
299 * boredom timer but the stream of events stopped meanwhile.
301 * In that case, we need to switch back to polling the file
302 * descriptor in the usual way.
304 g_assert (iks->is_bored);
305 interesting = TRUE;
308 if (buffer != stack_buffer)
309 g_free (buffer);
312 while (ik_source_can_dispatch_now (iks, now))
314 ik_event_t *event;
316 /* callback will free the event */
317 event = g_queue_pop_head (&iks->queue);
319 if (event->mask & IN_MOVED_FROM && !event->pair)
320 g_hash_table_remove (iks->unmatched_moves, GUINT_TO_POINTER (event->cookie));
322 G_LOCK (inotify_lock);
324 interesting |= (* user_callback) (event);
326 G_UNLOCK (inotify_lock);
329 /* The queue gets blocked iff we have unmatched moves */
330 g_assert ((iks->queue.length > 0) == (g_hash_table_size (iks->unmatched_moves) > 0));
332 /* Here's where we decide what will wake us up next.
334 * If the last event was interesting then we will wake up on the fd or
335 * when the timeout is reached on an unpaired move (if any).
337 * If the last event was uninteresting then we will wake up after the
338 * shorter of the boredom sleep or any timeout for a unpaired move.
340 if (interesting)
342 if (iks->is_bored)
344 g_source_modify_unix_fd (source, iks->fd_tag, G_IO_IN);
345 iks->is_bored = FALSE;
348 g_source_set_ready_time (source, ik_source_get_dispatch_time (iks));
350 else
352 guint64 dispatch_time = ik_source_get_dispatch_time (iks);
353 guint64 boredom_time = now + BOREDOM_SLEEP_TIME;
355 if (!iks->is_bored)
357 g_source_modify_unix_fd (source, iks->fd_tag, 0);
358 iks->is_bored = TRUE;
361 g_source_set_ready_time (source, MIN (dispatch_time, boredom_time));
364 return TRUE;
367 static InotifyKernelSource *
368 ik_source_new (gboolean (* callback) (ik_event_t *event))
370 static GSourceFuncs source_funcs = {
371 NULL, NULL,
372 ik_source_dispatch
373 /* should have a finalize, but it will never happen */
375 InotifyKernelSource *iks;
376 GSource *source;
378 source = g_source_new (&source_funcs, sizeof (InotifyKernelSource));
379 iks = (InotifyKernelSource *) source;
381 g_source_set_name (source, "inotify kernel source");
383 iks->unmatched_moves = g_hash_table_new (NULL, NULL);
384 iks->fd = inotify_init1 (IN_CLOEXEC);
386 if (iks->fd < 0)
387 iks->fd = inotify_init ();
389 if (iks->fd >= 0)
391 GError *error = NULL;
393 g_unix_set_fd_nonblocking (iks->fd, TRUE, &error);
394 g_assert_no_error (error);
396 iks->fd_tag = g_source_add_unix_fd (source, iks->fd, G_IO_IN);
399 g_source_set_callback (source, (GSourceFunc) callback, NULL, NULL);
401 g_source_attach (source, GLIB_PRIVATE_CALL (g_get_worker_context) ());
403 return iks;
406 gboolean
407 _ik_startup (gboolean (*cb)(ik_event_t *event))
409 if (g_once_init_enter (&inotify_source))
410 g_once_init_leave (&inotify_source, ik_source_new (cb));
412 return inotify_source->fd >= 0;
415 gint32
416 _ik_watch (const char *path,
417 guint32 mask,
418 int *err)
420 gint32 wd = -1;
422 g_assert (path != NULL);
423 g_assert (inotify_source && inotify_source->fd >= 0);
425 wd = inotify_add_watch (inotify_source->fd, path, mask);
427 if (wd < 0)
429 int e = errno;
430 /* FIXME: debug msg failed to add watch */
431 if (err)
432 *err = e;
433 return wd;
436 g_assert (wd >= 0);
437 return wd;
441 _ik_ignore (const char *path,
442 gint32 wd)
444 g_assert (wd >= 0);
445 g_assert (inotify_source && inotify_source->fd >= 0);
447 if (inotify_rm_watch (inotify_source->fd, wd) < 0)
449 /* int e = errno; */
450 /* failed to rm watch */
451 return -1;
454 return 0;