gtestutils: Fix a typo in a documentation comment
[glib.git] / gio / inotify / inotify-kernel.c
blobda177f30fb4824002c1851cfa8069a93b361fe41
1 /*
2 Copyright (C) 2005 John McCutchan
3 Copyright © 2015 Canonical Limited
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Library General Public
7 License as published by the Free Software Foundation; either
8 version 2 of the License, or (at your option) any later version.
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Library General Public License for more details.
15 You should have received a copy of the GNU Library General Public License
16 along with this library; if not, see <http://www.gnu.org/licenses/>.
18 Authors:
19 Ryan Lortie <desrt@desrt.ca>
20 John McCutchan <john@johnmccutchan.com>
23 #include "config.h"
25 #include <stdio.h>
26 #include <sys/ioctl.h>
27 #include <unistd.h>
28 #include <errno.h>
29 #include <string.h>
30 #include <glib.h>
31 #include "inotify-kernel.h"
32 #include <sys/inotify.h>
33 #include <glib/glib-unix.h>
35 #include "glib-private.h"
37 /* From inotify(7) */
38 #define MAX_EVENT_SIZE (sizeof(struct inotify_event) + NAME_MAX + 1)
40 /* Amount of time to sleep on receipt of uninteresting events */
41 #define BOREDOM_SLEEP_TIME (100 * G_TIME_SPAN_MILLISECOND)
43 /* Define limits on the maximum amount of time and maximum amount of
44 * interceding events between FROM/TO that can be merged.
46 #define MOVE_PAIR_DELAY (10 * G_TIME_SPAN_MILLISECOND)
47 #define MOVE_PAIR_DISTANCE (100)
49 /* We use the lock from inotify-helper.c
51 * We only have to take it on our read callback.
53 * The rest of locking is taken care of in inotify-helper.c
55 G_LOCK_EXTERN (inotify_lock);
57 static ik_event_t *
58 ik_event_new (struct inotify_event *kevent,
59 gint64 now)
61 ik_event_t *event = g_new0 (ik_event_t, 1);
63 event->wd = kevent->wd;
64 event->mask = kevent->mask;
65 event->cookie = kevent->cookie;
66 event->len = kevent->len;
67 event->timestamp = now;
68 if (event->len)
69 event->name = g_strdup (kevent->name);
70 else
71 event->name = NULL;
73 return event;
76 void
77 _ik_event_free (ik_event_t *event)
79 if (event->pair)
81 event->pair->pair = NULL;
82 _ik_event_free (event->pair);
85 g_free (event->name);
86 g_free (event);
89 typedef struct
91 GSource source;
93 GQueue queue;
94 gpointer fd_tag;
95 gint fd;
97 GHashTable *unmatched_moves;
98 gboolean is_bored;
99 } InotifyKernelSource;
101 static InotifyKernelSource *inotify_source;
103 static gint64
104 ik_source_get_dispatch_time (InotifyKernelSource *iks)
106 ik_event_t *head;
108 head = g_queue_peek_head (&iks->queue);
110 /* nothing in the queue: not ready */
111 if (!head)
112 return -1;
114 /* if it's not an unpaired move, it is ready now */
115 if (~head->mask & IN_MOVED_FROM || head->pair)
116 return 0;
118 /* if the queue is too long then it's ready now */
119 if (iks->queue.length > MOVE_PAIR_DISTANCE)
120 return 0;
122 /* otherwise, it's ready after the delay */
123 return head->timestamp + MOVE_PAIR_DELAY;
126 static gboolean
127 ik_source_can_dispatch_now (InotifyKernelSource *iks,
128 gint64 now)
130 gint64 dispatch_time;
132 dispatch_time = ik_source_get_dispatch_time (iks);
134 return 0 <= dispatch_time && dispatch_time <= now;
137 static gsize
138 ik_source_read_some_events (InotifyKernelSource *iks,
139 gchar *buffer,
140 gsize buffer_len)
142 gssize result;
144 again:
145 result = read (iks->fd, buffer, buffer_len);
147 if (result < 0)
149 if (errno == EINTR)
150 goto again;
152 if (errno == EAGAIN)
153 return 0;
155 g_error ("inotify read(): %s", g_strerror (errno));
157 else if (result == 0)
158 g_error ("inotify unexpectedly hit eof");
160 return result;
163 static gchar *
164 ik_source_read_all_the_events (InotifyKernelSource *iks,
165 gchar *buffer,
166 gsize buffer_len,
167 gsize *length_out)
169 gsize n_read;
171 n_read = ik_source_read_some_events (iks, buffer, buffer_len);
173 /* Check if we might have gotten another event if we had passed in a
174 * bigger buffer...
176 if (n_read + MAX_EVENT_SIZE > buffer_len)
178 gchar *new_buffer;
179 guint n_readable;
180 gint result;
182 /* figure out how many more bytes there are to read */
183 result = ioctl (iks->fd, FIONREAD, &n_readable);
184 if (result != 0)
185 g_error ("inotify ioctl(FIONREAD): %s", g_strerror (errno));
187 if (n_readable != 0)
189 /* there is in fact more data. allocate a new buffer, copy
190 * the existing data, and then append the remaining.
192 new_buffer = g_malloc (n_read + n_readable);
193 memcpy (new_buffer, buffer, n_read);
194 n_read += ik_source_read_some_events (iks, new_buffer + n_read, n_readable);
196 buffer = new_buffer;
198 /* There may be new events in the buffer that were added after
199 * the FIONREAD was performed, but we can't risk getting into
200 * a loop. We'll get them next time.
205 *length_out = n_read;
207 return buffer;
210 static gboolean
211 ik_source_dispatch (GSource *source,
212 GSourceFunc func,
213 gpointer user_data)
215 InotifyKernelSource *iks = (InotifyKernelSource *) source;
216 gboolean (*user_callback) (ik_event_t *event) = (void *) func;
217 gboolean interesting = FALSE;
218 gint64 now;
220 now = g_source_get_time (source);
222 if (iks->is_bored || g_source_query_unix_fd (source, iks->fd_tag))
224 gchar stack_buffer[4096];
225 gsize buffer_len;
226 gchar *buffer;
227 gsize offset;
229 /* We want to read all of the available events.
231 * We need to do it in a finite number of steps so that we don't
232 * get caught in a loop of read() with another process
233 * continuously adding events each time we drain them.
235 * In the normal case we will have only a few events in the queue,
236 * so start out by reading into a small stack-allocated buffer.
237 * Even though we're on a fresh stack frame, there is no need to
238 * pointlessly blow up with the size of the worker thread stack
239 * with a huge buffer here.
241 * If the result is large enough to cause us to suspect that
242 * another event may be pending then we allocate a buffer on the
243 * heap that can hold all of the events and read (once!) into that
244 * buffer.
246 buffer = ik_source_read_all_the_events (iks, stack_buffer, sizeof stack_buffer, &buffer_len);
248 offset = 0;
250 while (offset < buffer_len)
252 struct inotify_event *kevent = (struct inotify_event *) (buffer + offset);
253 ik_event_t *event;
255 event = ik_event_new (kevent, now);
257 offset += sizeof (struct inotify_event) + event->len;
259 if (event->mask & IN_MOVED_TO)
261 ik_event_t *pair;
263 pair = g_hash_table_lookup (iks->unmatched_moves, GUINT_TO_POINTER (event->cookie));
264 if (pair != NULL)
266 g_assert (!pair->pair);
268 g_hash_table_remove (iks->unmatched_moves, GUINT_TO_POINTER (event->cookie));
269 event->is_second_in_pair = TRUE;
270 event->pair = pair;
271 pair->pair = event;
272 continue;
275 interesting = TRUE;
278 else if (event->mask & IN_MOVED_FROM)
280 gboolean new;
282 new = g_hash_table_insert (iks->unmatched_moves, GUINT_TO_POINTER (event->cookie), event);
283 if G_UNLIKELY (!new)
284 g_warning ("inotify: got IN_MOVED_FROM event with already-pending cookie %#x", event->cookie);
286 interesting = TRUE;
289 g_queue_push_tail (&iks->queue, event);
292 if (buffer_len == 0)
294 /* We can end up reading nothing if we arrived here due to a
295 * boredom timer but the stream of events stopped meanwhile.
297 * In that case, we need to switch back to polling the file
298 * descriptor in the usual way.
300 g_assert (iks->is_bored);
301 interesting = TRUE;
304 if (buffer != stack_buffer)
305 g_free (buffer);
308 while (ik_source_can_dispatch_now (iks, now))
310 ik_event_t *event;
312 /* callback will free the event */
313 event = g_queue_pop_head (&iks->queue);
315 if (event->mask & IN_MOVED_FROM && !event->pair)
316 g_hash_table_remove (iks->unmatched_moves, GUINT_TO_POINTER (event->cookie));
318 G_LOCK (inotify_lock);
320 interesting |= (* user_callback) (event);
322 G_UNLOCK (inotify_lock);
325 /* The queue gets blocked iff we have unmatched moves */
326 g_assert ((iks->queue.length > 0) == (g_hash_table_size (iks->unmatched_moves) > 0));
328 /* Here's where we decide what will wake us up next.
330 * If the last event was interesting then we will wake up on the fd or
331 * when the timeout is reached on an unpaired move (if any).
333 * If the last event was uninteresting then we will wake up after the
334 * shorter of the boredom sleep or any timeout for a unpaired move.
336 if (interesting)
338 if (iks->is_bored)
340 g_source_modify_unix_fd (source, iks->fd_tag, G_IO_IN);
341 iks->is_bored = FALSE;
344 g_source_set_ready_time (source, ik_source_get_dispatch_time (iks));
346 else
348 guint64 dispatch_time = ik_source_get_dispatch_time (iks);
349 guint64 boredom_time = now + BOREDOM_SLEEP_TIME;
351 if (!iks->is_bored)
353 g_source_modify_unix_fd (source, iks->fd_tag, 0);
354 iks->is_bored = TRUE;
357 g_source_set_ready_time (source, MIN (dispatch_time, boredom_time));
360 return TRUE;
363 static InotifyKernelSource *
364 ik_source_new (gboolean (* callback) (ik_event_t *event))
366 static GSourceFuncs source_funcs = {
367 NULL, NULL,
368 ik_source_dispatch
369 /* should have a finalize, but it will never happen */
371 InotifyKernelSource *iks;
372 GSource *source;
374 source = g_source_new (&source_funcs, sizeof (InotifyKernelSource));
375 iks = (InotifyKernelSource *) source;
377 g_source_set_name (source, "inotify kernel source");
379 iks->unmatched_moves = g_hash_table_new (NULL, NULL);
380 iks->fd = inotify_init1 (IN_CLOEXEC);
382 if (iks->fd < 0)
383 iks->fd = inotify_init ();
385 if (iks->fd >= 0)
387 GError *error = NULL;
389 g_unix_set_fd_nonblocking (iks->fd, TRUE, &error);
390 g_assert_no_error (error);
392 iks->fd_tag = g_source_add_unix_fd (source, iks->fd, G_IO_IN);
395 g_source_set_callback (source, (GSourceFunc) callback, NULL, NULL);
397 g_source_attach (source, GLIB_PRIVATE_CALL (g_get_worker_context) ());
399 return iks;
402 gboolean
403 _ik_startup (gboolean (*cb)(ik_event_t *event))
405 if (g_once_init_enter (&inotify_source))
406 g_once_init_leave (&inotify_source, ik_source_new (cb));
408 return inotify_source->fd >= 0;
411 gint32
412 _ik_watch (const char *path,
413 guint32 mask,
414 int *err)
416 gint32 wd = -1;
418 g_assert (path != NULL);
419 g_assert (inotify_source && inotify_source->fd >= 0);
421 wd = inotify_add_watch (inotify_source->fd, path, mask);
423 if (wd < 0)
425 int e = errno;
426 /* FIXME: debug msg failed to add watch */
427 if (err)
428 *err = e;
429 return wd;
432 g_assert (wd >= 0);
433 return wd;
437 _ik_ignore (const char *path,
438 gint32 wd)
440 g_assert (wd >= 0);
441 g_assert (inotify_source && inotify_source->fd >= 0);
443 if (inotify_rm_watch (inotify_source->fd, wd) < 0)
445 /* int e = errno; */
446 /* failed to rm watch */
447 return -1;
450 return 0;