3 * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #define WIN32_LEAN_AND_MEAN
35 #undef WIN32_LEAN_AND_MEAN
37 #include <sys/types.h>
38 #ifdef HAVE_SYS_TIME_H
41 #include <sys/_time.h>
43 #include <sys/queue.h>
56 #include "event-internal.h"
60 #ifdef HAVE_EVENT_PORTS
61 extern const struct eventop evportops
;
64 extern const struct eventop selectops
;
67 extern const struct eventop pollops
;
70 extern const struct eventop epollops
;
72 #ifdef HAVE_WORKING_KQUEUE
73 extern const struct eventop kqops
;
76 extern const struct eventop devpollops
;
79 extern const struct eventop win32ops
;
82 /* In order of preference */
83 static const struct eventop
*eventops
[] = {
84 #ifdef HAVE_EVENT_PORTS
87 #ifdef HAVE_WORKING_KQUEUE
109 struct event_base
*current_base
= NULL
;
110 extern struct event_base
*evsignal_base
;
111 static int use_monotonic
;
113 /* Handle signals - This is a deprecated interface */
114 int (*event_sigcb
)(void); /* Signal callback when gotsig is set */
115 volatile sig_atomic_t event_gotsig
; /* Set in signal handler */
118 static void event_queue_insert(struct event_base
*, struct event
*, int);
119 static void event_queue_remove(struct event_base
*, struct event
*, int);
120 static int event_haveevents(struct event_base
*);
122 static void event_process_active(struct event_base
*);
124 static int timeout_next(struct event_base
*, struct timeval
**);
125 static void timeout_process(struct event_base
*);
126 static void timeout_correct(struct event_base
*, struct timeval
*);
129 detect_monotonic(void)
131 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
134 if (clock_gettime(CLOCK_MONOTONIC
, &ts
) == 0)
140 gettime(struct event_base
*base
, struct timeval
*tp
)
142 if (base
->tv_cache
.tv_sec
) {
143 *tp
= base
->tv_cache
;
147 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
151 if (clock_gettime(CLOCK_MONOTONIC
, &ts
) == -1)
154 tp
->tv_sec
= ts
.tv_sec
;
155 tp
->tv_usec
= ts
.tv_nsec
/ 1000;
160 return (evutil_gettimeofday(tp
, NULL
));
166 struct event_base
*base
= event_base_new();
178 struct event_base
*base
;
180 if ((base
= calloc(1, sizeof(struct event_base
))) == NULL
)
181 event_err(1, "%s: calloc", __func__
);
187 gettime(base
, &base
->event_tv
);
189 min_heap_ctor(&base
->timeheap
);
190 TAILQ_INIT(&base
->eventqueue
);
191 base
->sig
.ev_signal_pair
[0] = -1;
192 base
->sig
.ev_signal_pair
[1] = -1;
195 for (i
= 0; eventops
[i
] && !base
->evbase
; i
++) {
196 base
->evsel
= eventops
[i
];
198 base
->evbase
= base
->evsel
->init(base
);
201 if (base
->evbase
== NULL
)
202 event_errx(1, "%s: no event mechanism available", __func__
);
204 if (getenv("EVENT_SHOW_METHOD"))
205 event_msgx("libevent using: %s\n",
208 /* allocate a single active event queue */
209 event_base_priority_init(base
, 1);
215 event_base_free(struct event_base
*base
)
220 if (base
== NULL
&& current_base
)
222 if (base
== current_base
)
225 /* XXX(niels) - check for internal events first */
227 /* Delete all non-internal events. */
228 for (ev
= TAILQ_FIRST(&base
->eventqueue
); ev
; ) {
229 struct event
*next
= TAILQ_NEXT(ev
, ev_next
);
230 if (!(ev
->ev_flags
& EVLIST_INTERNAL
)) {
236 while ((ev
= min_heap_top(&base
->timeheap
)) != NULL
) {
241 for (i
= 0; i
< base
->nactivequeues
; ++i
) {
242 for (ev
= TAILQ_FIRST(base
->activequeues
[i
]); ev
; ) {
243 struct event
*next
= TAILQ_NEXT(ev
, ev_active_next
);
244 if (!(ev
->ev_flags
& EVLIST_INTERNAL
)) {
253 event_debug(("%s: %d events were still set in base",
254 __func__
, n_deleted
));
256 if (base
->evsel
->dealloc
!= NULL
)
257 base
->evsel
->dealloc(base
, base
->evbase
);
259 for (i
= 0; i
< base
->nactivequeues
; ++i
)
260 assert(TAILQ_EMPTY(base
->activequeues
[i
]));
262 assert(min_heap_empty(&base
->timeheap
));
263 min_heap_dtor(&base
->timeheap
);
265 for (i
= 0; i
< base
->nactivequeues
; ++i
)
266 free(base
->activequeues
[i
]);
267 free(base
->activequeues
);
269 assert(TAILQ_EMPTY(&base
->eventqueue
));
274 /* reinitialized the event base after a fork */
276 event_reinit(struct event_base
*base
)
278 const struct eventop
*evsel
= base
->evsel
;
279 void *evbase
= base
->evbase
;
283 /* check if this event mechanism requires reinit */
284 if (!evsel
->need_reinit
)
287 /* prevent internal delete */
288 if (base
->sig
.ev_signal_added
) {
289 /* we cannot call event_del here because the base has
290 * not been reinitialized yet. */
291 event_queue_remove(base
, &base
->sig
.ev_signal
,
293 if (base
->sig
.ev_signal
.ev_flags
& EVLIST_ACTIVE
)
294 event_queue_remove(base
, &base
->sig
.ev_signal
,
296 base
->sig
.ev_signal_added
= 0;
299 if (base
->evsel
->dealloc
!= NULL
)
300 base
->evsel
->dealloc(base
, base
->evbase
);
301 evbase
= base
->evbase
= evsel
->init(base
);
302 if (base
->evbase
== NULL
)
303 event_errx(1, "%s: could not reinitialize event mechanism",
306 TAILQ_FOREACH(ev
, &base
->eventqueue
, ev_next
) {
307 if (evsel
->add(evbase
, ev
) == -1)
315 event_priority_init(int npriorities
)
317 return event_base_priority_init(current_base
, npriorities
);
321 event_base_priority_init(struct event_base
*base
, int npriorities
)
325 if (base
->event_count_active
)
328 if (base
->nactivequeues
&& npriorities
!= base
->nactivequeues
) {
329 for (i
= 0; i
< base
->nactivequeues
; ++i
) {
330 free(base
->activequeues
[i
]);
332 free(base
->activequeues
);
335 /* Allocate our priority queues */
336 base
->nactivequeues
= npriorities
;
337 base
->activequeues
= (struct event_list
**)calloc(base
->nactivequeues
,
338 npriorities
* sizeof(struct event_list
*));
339 if (base
->activequeues
== NULL
)
340 event_err(1, "%s: calloc", __func__
);
342 for (i
= 0; i
< base
->nactivequeues
; ++i
) {
343 base
->activequeues
[i
] = malloc(sizeof(struct event_list
));
344 if (base
->activequeues
[i
] == NULL
)
345 event_err(1, "%s: malloc", __func__
);
346 TAILQ_INIT(base
->activequeues
[i
]);
353 event_haveevents(struct event_base
*base
)
355 return (base
->event_count
> 0);
359 * Active events are stored in priority queues. Lower priorities are always
360 * process before higher priorities. Low priority events can starve high
365 event_process_active(struct event_base
*base
)
368 struct event_list
*activeq
= NULL
;
372 for (i
= 0; i
< base
->nactivequeues
; ++i
) {
373 if (TAILQ_FIRST(base
->activequeues
[i
]) != NULL
) {
374 activeq
= base
->activequeues
[i
];
379 assert(activeq
!= NULL
);
381 for (ev
= TAILQ_FIRST(activeq
); ev
; ev
= TAILQ_FIRST(activeq
)) {
382 if (ev
->ev_events
& EV_PERSIST
)
383 event_queue_remove(base
, ev
, EVLIST_ACTIVE
);
387 /* Allows deletes to work */
388 ncalls
= ev
->ev_ncalls
;
389 ev
->ev_pncalls
= &ncalls
;
392 ev
->ev_ncalls
= ncalls
;
393 (*ev
->ev_callback
)((int)ev
->ev_fd
, ev
->ev_res
, ev
->ev_arg
);
394 if (event_gotsig
|| base
->event_break
)
401 * Wait continously for events. We exit only if no events are left.
407 return (event_loop(0));
411 event_base_dispatch(struct event_base
*event_base
)
413 return (event_base_loop(event_base
, 0));
417 event_base_get_method(struct event_base
*base
)
420 return (base
->evsel
->name
);
424 event_loopexit_cb(int fd
, short what
, void *arg
)
426 struct event_base
*base
= arg
;
427 base
->event_gotterm
= 1;
430 /* not thread safe */
432 event_loopexit(const struct timeval
*tv
)
434 return (event_once(-1, EV_TIMEOUT
, event_loopexit_cb
,
439 event_base_loopexit(struct event_base
*event_base
, const struct timeval
*tv
)
441 return (event_base_once(event_base
, -1, EV_TIMEOUT
, event_loopexit_cb
,
445 /* not thread safe */
447 event_loopbreak(void)
449 return (event_base_loopbreak(current_base
));
453 event_base_loopbreak(struct event_base
*event_base
)
455 if (event_base
== NULL
)
458 event_base
->event_break
= 1;
464 /* not thread safe */
467 event_loop(int flags
)
469 return event_base_loop(current_base
, flags
);
473 event_base_loop(struct event_base
*base
, int flags
)
475 const struct eventop
*evsel
= base
->evsel
;
476 void *evbase
= base
->evbase
;
478 struct timeval
*tv_p
;
481 /* clear time cache */
482 base
->tv_cache
.tv_sec
= 0;
484 if (base
->sig
.ev_signal_added
)
485 evsignal_base
= base
;
488 /* Terminate the loop if we have been asked to */
489 if (base
->event_gotterm
) {
490 base
->event_gotterm
= 0;
494 if (base
->event_break
) {
495 base
->event_break
= 0;
499 /* You cannot use this interface for multi-threaded apps */
500 while (event_gotsig
) {
503 res
= (*event_sigcb
)();
511 timeout_correct(base
, &tv
);
514 if (!base
->event_count_active
&& !(flags
& EVLOOP_NONBLOCK
)) {
515 timeout_next(base
, &tv_p
);
518 * if we have active events, we just poll new events
521 evutil_timerclear(&tv
);
524 /* If we have no events, we just exit */
525 if (!event_haveevents(base
)) {
526 event_debug(("%s: no events registered.", __func__
));
530 /* update last old time */
531 gettime(base
, &base
->event_tv
);
533 /* clear time cache */
534 base
->tv_cache
.tv_sec
= 0;
536 res
= evsel
->dispatch(base
, evbase
, tv_p
);
540 gettime(base
, &base
->tv_cache
);
542 timeout_process(base
);
544 if (base
->event_count_active
) {
545 event_process_active(base
);
546 if (!base
->event_count_active
&& (flags
& EVLOOP_ONCE
))
548 } else if (flags
& EVLOOP_NONBLOCK
)
552 /* clear time cache */
553 base
->tv_cache
.tv_sec
= 0;
555 event_debug(("%s: asked to terminate loop.", __func__
));
559 /* Sets up an event for processing once */
564 void (*cb
)(int, short, void *);
568 /* One-time callback, it deletes itself */
571 event_once_cb(int fd
, short events
, void *arg
)
573 struct event_once
*eonce
= arg
;
575 (*eonce
->cb
)(fd
, events
, eonce
->arg
);
579 /* not threadsafe, event scheduled once. */
581 event_once(int fd
, short events
,
582 void (*callback
)(int, short, void *), void *arg
, const struct timeval
*tv
)
584 return event_base_once(current_base
, fd
, events
, callback
, arg
, tv
);
587 /* Schedules an event once */
589 event_base_once(struct event_base
*base
, int fd
, short events
,
590 void (*callback
)(int, short, void *), void *arg
, const struct timeval
*tv
)
592 struct event_once
*eonce
;
596 /* We cannot support signals that just fire once */
597 if (events
& EV_SIGNAL
)
600 if ((eonce
= calloc(1, sizeof(struct event_once
))) == NULL
)
603 eonce
->cb
= callback
;
606 if (events
== EV_TIMEOUT
) {
608 evutil_timerclear(&etv
);
612 evtimer_set(&eonce
->ev
, event_once_cb
, eonce
);
613 } else if (events
& (EV_READ
|EV_WRITE
)) {
614 events
&= EV_READ
|EV_WRITE
;
616 event_set(&eonce
->ev
, fd
, events
, event_once_cb
, eonce
);
618 /* Bad event combination */
623 res
= event_base_set(base
, &eonce
->ev
);
625 res
= event_add(&eonce
->ev
, tv
);
635 event_set(struct event
*ev
, int fd
, short events
,
636 void (*callback
)(int, short, void *), void *arg
)
638 /* Take the current base - caller needs to set the real base later */
639 ev
->ev_base
= current_base
;
641 ev
->ev_callback
= callback
;
644 ev
->ev_events
= events
;
646 ev
->ev_flags
= EVLIST_INIT
;
648 ev
->ev_pncalls
= NULL
;
650 min_heap_elem_init(ev
);
652 /* by default, we put new events into the middle priority */
654 ev
->ev_pri
= current_base
->nactivequeues
/2;
658 event_base_set(struct event_base
*base
, struct event
*ev
)
660 /* Only innocent events may be assigned to a different base */
661 if (ev
->ev_flags
!= EVLIST_INIT
)
665 ev
->ev_pri
= base
->nactivequeues
/2;
671 * Set's the priority of an event - if an event is already scheduled
672 * changing the priority is going to fail.
676 event_priority_set(struct event
*ev
, int pri
)
678 if (ev
->ev_flags
& EVLIST_ACTIVE
)
680 if (pri
< 0 || pri
>= ev
->ev_base
->nactivequeues
)
689 * Checks if a specific event is pending or scheduled.
693 event_pending(struct event
*ev
, short event
, struct timeval
*tv
)
695 struct timeval now
, res
;
698 if (ev
->ev_flags
& EVLIST_INSERTED
)
699 flags
|= (ev
->ev_events
& (EV_READ
|EV_WRITE
|EV_SIGNAL
));
700 if (ev
->ev_flags
& EVLIST_ACTIVE
)
702 if (ev
->ev_flags
& EVLIST_TIMEOUT
)
705 event
&= (EV_TIMEOUT
|EV_READ
|EV_WRITE
|EV_SIGNAL
);
707 /* See if there is a timeout that we should report */
708 if (tv
!= NULL
&& (flags
& event
& EV_TIMEOUT
)) {
709 gettime(ev
->ev_base
, &now
);
710 evutil_timersub(&ev
->ev_timeout
, &now
, &res
);
711 /* correctly remap to real time */
712 evutil_gettimeofday(&now
, NULL
);
713 evutil_timeradd(&now
, &res
, tv
);
716 return (flags
& event
);
720 event_add(struct event
*ev
, const struct timeval
*tv
)
722 struct event_base
*base
= ev
->ev_base
;
723 const struct eventop
*evsel
= base
->evsel
;
724 void *evbase
= base
->evbase
;
728 "event_add: event: %p, %s%s%scall %p",
730 ev
->ev_events
& EV_READ
? "EV_READ " : " ",
731 ev
->ev_events
& EV_WRITE
? "EV_WRITE " : " ",
732 tv
? "EV_TIMEOUT " : " ",
735 assert(!(ev
->ev_flags
& ~EVLIST_ALL
));
738 * prepare for timeout insertion further below, if we get a
739 * failure on any step, we should not change any state.
741 if (tv
!= NULL
&& !(ev
->ev_flags
& EVLIST_TIMEOUT
)) {
742 if (min_heap_reserve(&base
->timeheap
,
743 1 + min_heap_size(&base
->timeheap
)) == -1)
744 return (-1); /* ENOMEM == errno */
747 if ((ev
->ev_events
& (EV_READ
|EV_WRITE
|EV_SIGNAL
)) &&
748 !(ev
->ev_flags
& (EVLIST_INSERTED
|EVLIST_ACTIVE
))) {
749 res
= evsel
->add(evbase
, ev
);
751 event_queue_insert(base
, ev
, EVLIST_INSERTED
);
755 * we should change the timout state only if the previous event
756 * addition succeeded.
758 if (res
!= -1 && tv
!= NULL
) {
762 * we already reserved memory above for the case where we
763 * are not replacing an exisiting timeout.
765 if (ev
->ev_flags
& EVLIST_TIMEOUT
)
766 event_queue_remove(base
, ev
, EVLIST_TIMEOUT
);
768 /* Check if it is active due to a timeout. Rescheduling
769 * this timeout before the callback can be executed
770 * removes it from the active list. */
771 if ((ev
->ev_flags
& EVLIST_ACTIVE
) &&
772 (ev
->ev_res
& EV_TIMEOUT
)) {
773 /* See if we are just active executing this
776 if (ev
->ev_ncalls
&& ev
->ev_pncalls
) {
781 event_queue_remove(base
, ev
, EVLIST_ACTIVE
);
785 evutil_timeradd(&now
, tv
, &ev
->ev_timeout
);
788 "event_add: timeout in %ld seconds, call %p",
789 tv
->tv_sec
, ev
->ev_callback
));
791 event_queue_insert(base
, ev
, EVLIST_TIMEOUT
);
798 event_del(struct event
*ev
)
800 struct event_base
*base
;
801 const struct eventop
*evsel
;
804 event_debug(("event_del: %p, callback %p",
805 ev
, ev
->ev_callback
));
807 /* An event without a base has not been added */
808 if (ev
->ev_base
== NULL
)
813 evbase
= base
->evbase
;
815 assert(!(ev
->ev_flags
& ~EVLIST_ALL
));
817 /* See if we are just active executing this event in a loop */
818 if (ev
->ev_ncalls
&& ev
->ev_pncalls
) {
823 if (ev
->ev_flags
& EVLIST_TIMEOUT
)
824 event_queue_remove(base
, ev
, EVLIST_TIMEOUT
);
826 if (ev
->ev_flags
& EVLIST_ACTIVE
)
827 event_queue_remove(base
, ev
, EVLIST_ACTIVE
);
829 if (ev
->ev_flags
& EVLIST_INSERTED
) {
830 event_queue_remove(base
, ev
, EVLIST_INSERTED
);
831 return (evsel
->del(evbase
, ev
));
838 event_active(struct event
*ev
, int res
, short ncalls
)
840 /* We get different kinds of events, add them together */
841 if (ev
->ev_flags
& EVLIST_ACTIVE
) {
847 ev
->ev_ncalls
= ncalls
;
848 ev
->ev_pncalls
= NULL
;
849 event_queue_insert(ev
->ev_base
, ev
, EVLIST_ACTIVE
);
853 timeout_next(struct event_base
*base
, struct timeval
**tv_p
)
857 struct timeval
*tv
= *tv_p
;
859 if ((ev
= min_heap_top(&base
->timeheap
)) == NULL
) {
860 /* if no time-based events are active wait for I/O */
865 if (gettime(base
, &now
) == -1)
868 if (evutil_timercmp(&ev
->ev_timeout
, &now
, <=)) {
869 evutil_timerclear(tv
);
873 evutil_timersub(&ev
->ev_timeout
, &now
, tv
);
875 assert(tv
->tv_sec
>= 0);
876 assert(tv
->tv_usec
>= 0);
878 event_debug(("timeout_next: in %ld seconds", tv
->tv_sec
));
883 * Determines if the time is running backwards by comparing the current
884 * time against the last time we checked. Not needed when using clock
889 timeout_correct(struct event_base
*base
, struct timeval
*tv
)
898 /* Check if time is running backwards */
900 if (evutil_timercmp(tv
, &base
->event_tv
, >=)) {
901 base
->event_tv
= *tv
;
905 event_debug(("%s: time is running backwards, corrected",
907 evutil_timersub(&base
->event_tv
, tv
, &off
);
910 * We can modify the key element of the node without destroying
911 * the key, beause we apply it to all in the right order.
913 pev
= base
->timeheap
.p
;
914 size
= base
->timeheap
.n
;
915 for (; size
-- > 0; ++pev
) {
916 struct timeval
*ev_tv
= &(**pev
).ev_timeout
;
917 evutil_timersub(ev_tv
, &off
, ev_tv
);
919 /* Now remember what the new time turned out to be. */
920 base
->event_tv
= *tv
;
924 timeout_process(struct event_base
*base
)
929 if (min_heap_empty(&base
->timeheap
))
934 while ((ev
= min_heap_top(&base
->timeheap
))) {
935 if (evutil_timercmp(&ev
->ev_timeout
, &now
, >))
938 /* delete this event from the I/O queues */
941 event_debug(("timeout_process: call %p",
943 event_active(ev
, EV_TIMEOUT
, 1);
948 event_queue_remove(struct event_base
*base
, struct event
*ev
, int queue
)
950 if (!(ev
->ev_flags
& queue
))
951 event_errx(1, "%s: %p(fd %d) not on queue %x", __func__
,
952 ev
, ev
->ev_fd
, queue
);
954 if (~ev
->ev_flags
& EVLIST_INTERNAL
)
957 ev
->ev_flags
&= ~queue
;
959 case EVLIST_INSERTED
:
960 TAILQ_REMOVE(&base
->eventqueue
, ev
, ev_next
);
963 base
->event_count_active
--;
964 TAILQ_REMOVE(base
->activequeues
[ev
->ev_pri
],
968 min_heap_erase(&base
->timeheap
, ev
);
971 event_errx(1, "%s: unknown queue %x", __func__
, queue
);
976 event_queue_insert(struct event_base
*base
, struct event
*ev
, int queue
)
978 if (ev
->ev_flags
& queue
) {
979 /* Double insertion is possible for active events */
980 if (queue
& EVLIST_ACTIVE
)
983 event_errx(1, "%s: %p(fd %d) already on queue %x", __func__
,
984 ev
, ev
->ev_fd
, queue
);
987 if (~ev
->ev_flags
& EVLIST_INTERNAL
)
990 ev
->ev_flags
|= queue
;
992 case EVLIST_INSERTED
:
993 TAILQ_INSERT_TAIL(&base
->eventqueue
, ev
, ev_next
);
996 base
->event_count_active
++;
997 TAILQ_INSERT_TAIL(base
->activequeues
[ev
->ev_pri
],
1000 case EVLIST_TIMEOUT
: {
1001 min_heap_push(&base
->timeheap
, ev
);
1005 event_errx(1, "%s: unknown queue %x", __func__
, queue
);
1009 /* Functions for debugging */
1012 event_get_version(void)
1018 * No thread-safe interface needed - the information should be the same
1023 event_get_method(void)
1025 return (current_base
->evsel
->name
);