1 /* $OpenBSD: kqueue.c,v 1.5 2002/07/10 14:41:31 art Exp $ */
4 * Copyright 2000-2002 Niels Provos <provos@citi.umich.edu>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/types.h>
36 #ifdef HAVE_SYS_TIME_H
39 #include <sys/_libevent_time.h>
41 #include <sys/queue.h>
42 #include <sys/event.h>
50 #ifdef HAVE_INTTYPES_H
54 /* Some platforms apparently define the udata field of struct kevent as
55 * intptr_t, whereas others define it as void*. There doesn't seem to be an
56 * easy way to tell them apart via autoconf, so we need to use OS macros. */
57 #if defined(HAVE_INTTYPES_H) && !defined(__OpenBSD__) && !defined(__FreeBSD__) && !defined(__darwin__) && !defined(__APPLE__)
58 #define PTR_TO_UDATA(x) ((intptr_t)(x))
60 #define PTR_TO_UDATA(x) (x)
64 #include "event-internal.h"
67 #define EVLIST_X_KQINKERNEL 0x1000
72 struct kevent
*changes
;
74 struct kevent
*events
;
75 struct event_list evsigevents
[NSIG
];
81 static void *kq_init (struct event_base
*);
82 static int kq_add (void *, struct event
*);
83 static int kq_del (void *, struct event
*);
84 static int kq_dispatch (struct event_base
*, void *, struct timeval
*);
85 static int kq_insert (struct kqop
*, struct kevent
*);
86 static void kq_dealloc (struct event_base
*, void *);
88 const struct eventop kqops
= {
99 kq_init(struct event_base
*base
)
102 struct kqop
*kqueueop
;
104 /* Disable kqueue when this environment variable is set */
105 if (evutil_getenv("EVENT_NOKQUEUE"))
108 if (!(kqueueop
= calloc(1, sizeof(struct kqop
))))
111 /* Initalize the kernel queue */
113 if ((kq
= kqueue()) == -1) {
114 event_warn("kqueue");
121 kqueueop
->pid
= getpid();
123 /* Initalize fields */
124 kqueueop
->changes
= malloc(NEVENT
* sizeof(struct kevent
));
125 if (kqueueop
->changes
== NULL
) {
129 kqueueop
->events
= malloc(NEVENT
* sizeof(struct kevent
));
130 if (kqueueop
->events
== NULL
) {
131 free (kqueueop
->changes
);
135 kqueueop
->nevents
= NEVENT
;
137 /* we need to keep track of multiple events per signal */
138 for (i
= 0; i
< NSIG
; ++i
) {
139 TAILQ_INIT(&kqueueop
->evsigevents
[i
]);
142 /* Check for Mac OS X kqueue bug. */
143 kqueueop
->changes
[0].ident
= -1;
144 kqueueop
->changes
[0].filter
= EVFILT_READ
;
145 kqueueop
->changes
[0].flags
= EV_ADD
;
147 * If kqueue works, then kevent will succeed, and it will
148 * stick an error in events[0]. If kqueue is broken, then
152 kqueueop
->changes
, 1, kqueueop
->events
, NEVENT
, NULL
) != 1 ||
153 kqueueop
->events
[0].ident
!= -1 ||
154 kqueueop
->events
[0].flags
!= EV_ERROR
) {
155 event_warn("%s: detected broken kqueue; not using.", __func__
);
156 free(kqueueop
->changes
);
157 free(kqueueop
->events
);
167 kq_insert(struct kqop
*kqop
, struct kevent
*kev
)
169 int nevents
= kqop
->nevents
;
171 if (kqop
->nchanges
== nevents
) {
172 struct kevent
*newchange
;
173 struct kevent
*newresult
;
177 newchange
= realloc(kqop
->changes
,
178 nevents
* sizeof(struct kevent
));
179 if (newchange
== NULL
) {
180 event_warn("%s: malloc", __func__
);
183 kqop
->changes
= newchange
;
185 newresult
= realloc(kqop
->events
,
186 nevents
* sizeof(struct kevent
));
189 * If we fail, we don't have to worry about freeing,
190 * the next realloc will pick it up.
192 if (newresult
== NULL
) {
193 event_warn("%s: malloc", __func__
);
196 kqop
->events
= newresult
;
198 kqop
->nevents
= nevents
;
201 memcpy(&kqop
->changes
[kqop
->nchanges
++], kev
, sizeof(struct kevent
));
203 event_debug(("%s: fd %d %s%s",
204 __func__
, (int)kev
->ident
,
205 kev
->filter
== EVFILT_READ
? "EVFILT_READ" : "EVFILT_WRITE",
206 kev
->flags
== EV_DELETE
? " (del)" : ""));
212 kq_sighandler(int sig
)
214 /* Do nothing here */
218 kq_dispatch(struct event_base
*base
, void *arg
, struct timeval
*tv
)
220 struct kqop
*kqop
= arg
;
221 struct kevent
*changes
= kqop
->changes
;
222 struct kevent
*events
= kqop
->events
;
224 struct timespec ts
, *ts_p
= NULL
;
228 TIMEVAL_TO_TIMESPEC(tv
, &ts
);
232 res
= kevent(kqop
->kq
, changes
, kqop
->nchanges
,
233 events
, kqop
->nevents
, ts_p
);
236 if (errno
!= EINTR
) {
237 event_warn("kevent");
244 event_debug(("%s: kevent reports %d", __func__
, res
));
246 for (i
= 0; i
< res
; i
++) {
249 if (events
[i
].flags
& EV_ERROR
) {
251 * Error messages that can happen, when a delete fails.
252 * EBADF happens when the file discriptor has been
254 * ENOENT when the file discriptor was closed and
256 * EINVAL for some reasons not understood; EINVAL
257 * should not be returned ever; but FreeBSD does :-\
258 * An error is also indicated when a callback deletes
259 * an event we are still processing. In that case
260 * the data field is set to ENOENT.
262 if (events
[i
].data
== EBADF
||
263 events
[i
].data
== EINVAL
||
264 events
[i
].data
== ENOENT
)
266 errno
= events
[i
].data
;
270 if (events
[i
].filter
== EVFILT_READ
) {
272 } else if (events
[i
].filter
== EVFILT_WRITE
) {
274 } else if (events
[i
].filter
== EVFILT_SIGNAL
) {
281 if (events
[i
].filter
== EVFILT_SIGNAL
) {
282 struct event_list
*head
=
283 (struct event_list
*)events
[i
].udata
;
284 TAILQ_FOREACH(ev
, head
, ev_signal_next
) {
285 event_active(ev
, which
, events
[i
].data
);
288 ev
= (struct event
*)events
[i
].udata
;
290 if (!(ev
->ev_events
& EV_PERSIST
))
291 ev
->ev_flags
&= ~EVLIST_X_KQINKERNEL
;
293 event_active(ev
, which
, 1);
302 kq_add(void *arg
, struct event
*ev
)
304 struct kqop
*kqop
= arg
;
307 if (ev
->ev_events
& EV_SIGNAL
) {
308 int nsignal
= EVENT_SIGNAL(ev
);
310 assert(nsignal
>= 0 && nsignal
< NSIG
);
311 if (TAILQ_EMPTY(&kqop
->evsigevents
[nsignal
])) {
312 struct timespec timeout
= { 0, 0 };
314 memset(&kev
, 0, sizeof(kev
));
316 kev
.filter
= EVFILT_SIGNAL
;
318 kev
.udata
= PTR_TO_UDATA(&kqop
->evsigevents
[nsignal
]);
320 /* Be ready for the signal if it is sent any
321 * time between now and the next call to
323 if (kevent(kqop
->kq
, &kev
, 1, NULL
, 0, &timeout
) == -1)
326 if (_evsignal_set_handler(ev
->ev_base
, nsignal
,
327 kq_sighandler
) == -1)
331 TAILQ_INSERT_TAIL(&kqop
->evsigevents
[nsignal
], ev
,
333 ev
->ev_flags
|= EVLIST_X_KQINKERNEL
;
337 if (ev
->ev_events
& EV_READ
) {
338 memset(&kev
, 0, sizeof(kev
));
339 kev
.ident
= ev
->ev_fd
;
340 kev
.filter
= EVFILT_READ
;
342 /* Make it behave like select() and poll() */
343 kev
.fflags
= NOTE_EOF
;
346 if (!(ev
->ev_events
& EV_PERSIST
))
347 kev
.flags
|= EV_ONESHOT
;
348 kev
.udata
= PTR_TO_UDATA(ev
);
350 if (kq_insert(kqop
, &kev
) == -1)
353 ev
->ev_flags
|= EVLIST_X_KQINKERNEL
;
356 if (ev
->ev_events
& EV_WRITE
) {
357 memset(&kev
, 0, sizeof(kev
));
358 kev
.ident
= ev
->ev_fd
;
359 kev
.filter
= EVFILT_WRITE
;
361 if (!(ev
->ev_events
& EV_PERSIST
))
362 kev
.flags
|= EV_ONESHOT
;
363 kev
.udata
= PTR_TO_UDATA(ev
);
365 if (kq_insert(kqop
, &kev
) == -1)
368 ev
->ev_flags
|= EVLIST_X_KQINKERNEL
;
375 kq_del(void *arg
, struct event
*ev
)
377 struct kqop
*kqop
= arg
;
380 if (!(ev
->ev_flags
& EVLIST_X_KQINKERNEL
))
383 if (ev
->ev_events
& EV_SIGNAL
) {
384 int nsignal
= EVENT_SIGNAL(ev
);
385 struct timespec timeout
= { 0, 0 };
387 assert(nsignal
>= 0 && nsignal
< NSIG
);
388 TAILQ_REMOVE(&kqop
->evsigevents
[nsignal
], ev
, ev_signal_next
);
389 if (TAILQ_EMPTY(&kqop
->evsigevents
[nsignal
])) {
390 memset(&kev
, 0, sizeof(kev
));
392 kev
.filter
= EVFILT_SIGNAL
;
393 kev
.flags
= EV_DELETE
;
395 /* Because we insert signal events
396 * immediately, we need to delete them
397 * immediately, too */
398 if (kevent(kqop
->kq
, &kev
, 1, NULL
, 0, &timeout
) == -1)
401 if (_evsignal_restore_handler(ev
->ev_base
,
406 ev
->ev_flags
&= ~EVLIST_X_KQINKERNEL
;
410 if (ev
->ev_events
& EV_READ
) {
411 memset(&kev
, 0, sizeof(kev
));
412 kev
.ident
= ev
->ev_fd
;
413 kev
.filter
= EVFILT_READ
;
414 kev
.flags
= EV_DELETE
;
416 if (kq_insert(kqop
, &kev
) == -1)
419 ev
->ev_flags
&= ~EVLIST_X_KQINKERNEL
;
422 if (ev
->ev_events
& EV_WRITE
) {
423 memset(&kev
, 0, sizeof(kev
));
424 kev
.ident
= ev
->ev_fd
;
425 kev
.filter
= EVFILT_WRITE
;
426 kev
.flags
= EV_DELETE
;
428 if (kq_insert(kqop
, &kev
) == -1)
431 ev
->ev_flags
&= ~EVLIST_X_KQINKERNEL
;
438 kq_dealloc(struct event_base
*base
, void *arg
)
440 struct kqop
*kqop
= arg
;
446 if (kqop
->kq
>= 0 && kqop
->pid
== getpid())
448 memset(kqop
, 0, sizeof(struct kqop
));