2 Copyright (C) 2005 John McCutchan
4 The Gnome Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Library General Public License as
6 published by the Free Software Foundation; either version 2 of the
7 License, or (at your option) any later version.
9 The Gnome Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Library General Public License for more details.
14 You should have received a copy of the GNU Library General Public
15 License along with the Gnome Library; see the file COPYING.LIB. If not,
16 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 Boston, MA 02111-1307, USA.
20 John McCutchan <john@johnmccutchan.com>
26 #include <sys/ioctl.h>
31 #include "inotify-kernel.h"
32 #include <sys/inotify.h>
34 /* Timings for pairing MOVED_TO / MOVED_FROM events */
35 #define PROCESS_EVENTS_TIME 1000 /* 1000 milliseconds (1 hz) */
36 #define DEFAULT_HOLD_UNTIL_TIME 0 /* 0 millisecond */
37 #define MOVE_HOLD_UNTIL_TIME 500 /* 500 microseconds or 0.5 milliseconds */
39 static int inotify_instance_fd
= -1;
40 static GQueue
*events_to_process
= NULL
;
41 static GQueue
*event_queue
= NULL
;
42 static GHashTable
* cookie_hash
= NULL
;
43 static GIOChannel
*inotify_read_ioc
;
44 static GPollFD ik_poll_fd
;
45 static gboolean ik_poll_fd_enabled
= TRUE
;
46 static void (*user_cb
)(ik_event_t
*event
);
48 static gboolean
ik_read_callback (gpointer user_data
);
49 static gboolean
ik_process_eq_callback (gpointer user_data
);
51 static guint32 ik_move_matches
= 0;
52 static guint32 ik_move_misses
= 0;
54 static gboolean process_eq_running
= FALSE
;
56 /* We use the lock from inotify-helper.c
58 * There are two places that we take this lock
60 * 1) In ik_read_callback
62 * 2) ik_process_eq_callback.
65 * The rest of locking is taken care of in inotify-helper.c
67 G_LOCK_EXTERN (inotify_lock
);
69 typedef struct ik_event_internal
{
74 struct ik_event_internal
*pair
;
75 } ik_event_internal_t
;
77 /* In order to perform non-sleeping inotify event chunking we need
81 ik_source_prepare (GSource
*source
,
88 ik_source_timeout (gpointer data
)
90 GSource
*source
= (GSource
*)data
;
92 /* Re-active the PollFD */
93 g_source_add_poll (source
, &ik_poll_fd
);
94 g_source_unref (source
);
95 ik_poll_fd_enabled
= TRUE
;
100 #define MAX_PENDING_COUNT 2
101 #define PENDING_THRESHOLD(qsize) ((qsize) >> 1)
102 #define PENDING_MARGINAL_COST(p) ((unsigned int)(1 << (p)))
103 #define MAX_QUEUED_EVENTS 2048
104 #define AVERAGE_EVENT_SIZE sizeof (struct inotify_event) + 16
105 #define TIMEOUT_MILLISECONDS 10
108 ik_source_check (GSource
*source
)
110 static int prev_pending
= 0, pending_count
= 0;
112 /* We already disabled the PollFD or
113 * nothing to be read from inotify */
114 if (!ik_poll_fd_enabled
|| !(ik_poll_fd
.revents
& G_IO_IN
))
117 if (pending_count
< MAX_PENDING_COUNT
)
119 unsigned int pending
;
121 if (ioctl (inotify_instance_fd
, FIONREAD
, &pending
) == -1)
124 pending
/= AVERAGE_EVENT_SIZE
;
126 /* Don't wait if the number of pending events is too close
127 * to the maximum queue size.
129 if (pending
> PENDING_THRESHOLD (MAX_QUEUED_EVENTS
))
132 /* With each successive iteration, the minimum rate for
133 * further sleep doubles.
135 if (pending
-prev_pending
< PENDING_MARGINAL_COST (pending_count
))
138 prev_pending
= pending
;
141 /* We are going to wait to read the events: */
143 /* Remove the PollFD from the source */
144 g_source_remove_poll (source
, &ik_poll_fd
);
145 /* To avoid threading issues we need to flag that we've done that */
146 ik_poll_fd_enabled
= FALSE
;
147 /* Set a timeout to re-add the PollFD to the source */
148 g_source_ref (source
);
149 g_timeout_add (TIMEOUT_MILLISECONDS
, ik_source_timeout
, source
);
155 /* We are ready to read events from inotify */
164 ik_source_dispatch (GSource
*source
,
165 GSourceFunc callback
,
169 return callback (user_data
);
173 static GSourceFuncs ik_source_funcs
=
181 gboolean
_ik_startup (void (*cb
)(ik_event_t
*event
))
183 static gboolean initialized
= FALSE
;
187 /* Ignore multi-calls */
189 return inotify_instance_fd
>= 0;
193 #ifdef HAVE_INOTIFY_INIT1
194 inotify_instance_fd
= inotify_init1 (IN_CLOEXEC
);
196 inotify_instance_fd
= -1;
198 if (inotify_instance_fd
< 0)
199 inotify_instance_fd
= inotify_init ();
201 if (inotify_instance_fd
< 0)
204 inotify_read_ioc
= g_io_channel_unix_new (inotify_instance_fd
);
205 ik_poll_fd
.fd
= inotify_instance_fd
;
206 ik_poll_fd
.events
= G_IO_IN
| G_IO_HUP
| G_IO_ERR
;
207 g_io_channel_set_encoding (inotify_read_ioc
, NULL
, NULL
);
208 g_io_channel_set_flags (inotify_read_ioc
, G_IO_FLAG_NONBLOCK
, NULL
);
210 source
= g_source_new (&ik_source_funcs
, sizeof (GSource
));
211 g_source_set_name (source
, "GIO Inotify");
212 g_source_add_poll (source
, &ik_poll_fd
);
213 g_source_set_callback (source
, ik_read_callback
, NULL
, NULL
);
214 g_source_attach (source
, NULL
);
215 g_source_unref (source
);
217 cookie_hash
= g_hash_table_new (g_direct_hash
, g_direct_equal
);
218 event_queue
= g_queue_new ();
219 events_to_process
= g_queue_new ();
224 static ik_event_internal_t
*
225 ik_event_internal_new (ik_event_t
*event
)
227 ik_event_internal_t
*internal_event
= g_new0 (ik_event_internal_t
, 1);
232 g_get_current_time (&tv
);
233 g_time_val_add (&tv
, DEFAULT_HOLD_UNTIL_TIME
);
234 internal_event
->event
= event
;
235 internal_event
->hold_until
= tv
;
237 return internal_event
;
241 ik_event_new (char *buffer
)
243 struct inotify_event
*kevent
= (struct inotify_event
*)buffer
;
244 ik_event_t
*event
= g_new0 (ik_event_t
, 1);
248 event
->wd
= kevent
->wd
;
249 event
->mask
= kevent
->mask
;
250 event
->cookie
= kevent
->cookie
;
251 event
->len
= kevent
->len
;
253 event
->name
= g_strdup (kevent
->name
);
255 event
->name
= g_strdup ("");
261 _ik_event_new_dummy (const char *name
,
265 ik_event_t
*event
= g_new0 (ik_event_t
, 1);
270 event
->name
= g_strdup (name
);
272 event
->name
= g_strdup("");
274 event
->len
= strlen (event
->name
);
280 _ik_event_free (ik_event_t
*event
)
283 _ik_event_free (event
->pair
);
284 g_free (event
->name
);
289 _ik_watch (const char *path
,
295 g_assert (path
!= NULL
);
296 g_assert (inotify_instance_fd
>= 0);
298 wd
= inotify_add_watch (inotify_instance_fd
, path
, mask
);
303 /* FIXME: debug msg failed to add watch */
314 _ik_ignore (const char *path
,
318 g_assert (inotify_instance_fd
>= 0);
320 if (inotify_rm_watch (inotify_instance_fd
, wd
) < 0)
323 /* failed to rm watch */
331 _ik_move_stats (guint32
*matches
,
335 *matches
= ik_move_matches
;
338 *misses
= ik_move_misses
;
342 _ik_mask_to_string (guint32 mask
)
344 gboolean is_dir
= mask
& IN_ISDIR
;
352 return "ACCESS (dir)";
354 return "MODIFY (dir)";
356 return "ATTRIB (dir)";
358 return "CLOSE_WRITE (dir)";
359 case IN_CLOSE_NOWRITE
:
360 return "CLOSE_NOWRITE (dir)";
364 return "MOVED_FROM (dir)";
366 return "MOVED_TO (dir)";
368 return "DELETE (dir)";
370 return "CREATE (dir)";
372 return "DELETE_SELF (dir)";
374 return "UNMOUNT (dir)";
376 return "Q_OVERFLOW (dir)";
378 return "IGNORED (dir)";
380 return "UNKNOWN_EVENT (dir)";
394 return "CLOSE_WRITE";
395 case IN_CLOSE_NOWRITE
:
396 return "CLOSE_NOWRITE";
408 return "DELETE_SELF";
416 return "UNKNOWN_EVENT";
423 ik_read_events (gsize
*buffer_size_out
,
426 static gchar
*buffer
= NULL
;
427 static gsize buffer_size
;
429 /* Initialize the buffer on our first call */
432 buffer_size
= AVERAGE_EVENT_SIZE
;
433 buffer_size
*= MAX_QUEUED_EVENTS
;
434 buffer
= g_malloc (buffer_size
);
437 *buffer_size_out
= 0;
440 memset (buffer
, 0, buffer_size
);
442 if (g_io_channel_read_chars (inotify_read_ioc
, (char *)buffer
, buffer_size
, buffer_size_out
, NULL
) != G_IO_STATUS_NORMAL
) {
445 *buffer_out
= buffer
;
449 ik_read_callback (gpointer user_data
)
452 gsize buffer_size
, buffer_i
, events
;
454 G_LOCK (inotify_lock
);
455 ik_read_events (&buffer_size
, &buffer
);
459 while (buffer_i
< buffer_size
)
461 struct inotify_event
*event
;
463 event
= (struct inotify_event
*)&buffer
[buffer_i
];
464 event_size
= sizeof(struct inotify_event
) + event
->len
;
465 g_queue_push_tail (events_to_process
, ik_event_internal_new (ik_event_new (&buffer
[buffer_i
])));
466 buffer_i
+= event_size
;
470 /* If the event process callback is off, turn it back on */
471 if (!process_eq_running
&& events
)
473 process_eq_running
= TRUE
;
474 g_timeout_add (PROCESS_EVENTS_TIME
, ik_process_eq_callback
, NULL
);
477 G_UNLOCK (inotify_lock
);
483 g_timeval_lt (GTimeVal
*val1
,
486 if (val1
->tv_sec
< val2
->tv_sec
)
489 if (val1
->tv_sec
> val2
->tv_sec
)
492 /* val1->tv_sec == val2->tv_sec */
493 if (val1
->tv_usec
< val2
->tv_usec
)
500 g_timeval_eq (GTimeVal
*val1
,
503 return (val1
->tv_sec
== val2
->tv_sec
) && (val1
->tv_usec
== val2
->tv_usec
);
507 ik_pair_events (ik_event_internal_t
*event1
,
508 ik_event_internal_t
*event2
)
510 g_assert (event1
&& event2
);
511 /* We should only be pairing events that have the same cookie */
512 g_assert (event1
->event
->cookie
== event2
->event
->cookie
);
513 /* We shouldn't pair an event that already is paired */
514 g_assert (event1
->pair
== NULL
&& event2
->pair
== NULL
);
516 /* Pair the internal structures and the ik_event_t structures */
517 event1
->pair
= event2
;
518 event1
->event
->pair
= event2
->event
;
519 event2
->event
->is_second_in_pair
= TRUE
;
521 if (g_timeval_lt (&event1
->hold_until
, &event2
->hold_until
))
522 event1
->hold_until
= event2
->hold_until
;
524 event2
->hold_until
= event1
->hold_until
;
528 ik_event_add_microseconds (ik_event_internal_t
*event
,
532 g_time_val_add (&event
->hold_until
, ms
);
536 ik_event_ready (ik_event_internal_t
*event
)
541 g_get_current_time (&tv
);
543 /* An event is ready if,
545 * it has no cookie -- there is nothing to be gained by holding it
546 * or, it is already paired -- we don't need to hold it anymore
547 * or, we have held it long enough
550 event
->event
->cookie
== 0 ||
551 event
->pair
!= NULL
||
552 g_timeval_lt (&event
->hold_until
, &tv
) ||
553 g_timeval_eq (&event
->hold_until
, &tv
);
557 ik_pair_moves (gpointer data
,
560 ik_event_internal_t
*event
= (ik_event_internal_t
*)data
;
562 if (event
->seen
== TRUE
|| event
->sent
== TRUE
)
565 if (event
->event
->cookie
!= 0)
567 /* When we get a MOVED_FROM event we delay sending the event by
568 * MOVE_HOLD_UNTIL_TIME microseconds. We need to do this because a
569 * MOVED_TO pair _might_ be coming in the near future */
570 if (event
->event
->mask
& IN_MOVED_FROM
)
572 g_hash_table_insert (cookie_hash
, GINT_TO_POINTER (event
->event
->cookie
), event
);
573 /* because we don't deliver move events there is no point in waiting for the match right now. */
574 ik_event_add_microseconds (event
, MOVE_HOLD_UNTIL_TIME
);
576 else if (event
->event
->mask
& IN_MOVED_TO
)
578 /* We need to check if we are waiting for this MOVED_TO events cookie to pair it with
580 ik_event_internal_t
*match
= NULL
;
581 match
= g_hash_table_lookup (cookie_hash
, GINT_TO_POINTER (event
->event
->cookie
));
584 g_hash_table_remove (cookie_hash
, GINT_TO_POINTER (event
->event
->cookie
));
585 ik_pair_events (match
, event
);
593 ik_process_events (void)
595 g_queue_foreach (events_to_process
, ik_pair_moves
, NULL
);
597 while (!g_queue_is_empty (events_to_process
))
599 ik_event_internal_t
*event
= g_queue_peek_head (events_to_process
);
601 /* This must have been sent as part of a MOVED_TO/MOVED_FROM */
605 g_queue_pop_head (events_to_process
);
606 /* Free the internal event structure */
611 /* The event isn't ready yet */
612 if (!ik_event_ready (event
))
616 event
= g_queue_pop_head (events_to_process
);
618 /* Check if this is a MOVED_FROM that is also sitting in the cookie_hash */
619 if (event
->event
->cookie
&& event
->pair
== NULL
&&
620 g_hash_table_lookup (cookie_hash
, GINT_TO_POINTER (event
->event
->cookie
)))
621 g_hash_table_remove (cookie_hash
, GINT_TO_POINTER (event
->event
->cookie
));
625 /* We send out paired MOVED_FROM/MOVED_TO events in the same event buffer */
626 /* g_assert (event->event->mask == IN_MOVED_FROM && event->pair->event->mask == IN_MOVED_TO); */
627 /* Copy the paired data */
628 event
->pair
->sent
= TRUE
;
632 else if (event
->event
->cookie
)
634 /* If we couldn't pair a MOVED_FROM and MOVED_TO together, we change
636 /* Changeing MOVED_FROM to DELETE and MOVED_TO to create lets us make
637 * the gaurantee that you will never see a non-matched MOVE event */
638 event
->event
->original_mask
= event
->event
->mask
;
640 if (event
->event
->mask
& IN_MOVED_FROM
)
642 event
->event
->mask
= IN_DELETE
|(event
->event
->mask
& IN_ISDIR
);
643 ik_move_misses
++; /* not super accurate, if we aren't watching the destination it still counts as a miss */
645 if (event
->event
->mask
& IN_MOVED_TO
)
646 event
->event
->mask
= IN_CREATE
|(event
->event
->mask
& IN_ISDIR
);
649 /* Push the ik_event_t onto the event queue */
650 g_queue_push_tail (event_queue
, event
->event
);
651 /* Free the internal event structure */
657 ik_process_eq_callback (gpointer user_data
)
661 /* Try and move as many events to the event queue */
662 G_LOCK (inotify_lock
);
663 ik_process_events ();
665 while (!g_queue_is_empty (event_queue
))
667 ik_event_t
*event
= g_queue_pop_head (event_queue
);
674 if (g_queue_get_length (events_to_process
) == 0)
676 process_eq_running
= FALSE
;
680 G_UNLOCK (inotify_lock
);