2 * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #define WIN32_LEAN_AND_MEAN
34 #undef WIN32_LEAN_AND_MEAN
36 #include <sys/types.h>
37 #ifdef HAVE_SYS_TIME_H
40 #include <sys/_libevent_time.h>
42 #include <sys/queue.h>
55 #include "event-internal.h"
59 #ifdef HAVE_EVENT_PORTS
60 extern const struct eventop evportops
;
63 extern const struct eventop selectops
;
66 extern const struct eventop pollops
;
69 extern const struct eventop epollops
;
71 #ifdef HAVE_WORKING_KQUEUE
72 extern const struct eventop kqops
;
75 extern const struct eventop devpollops
;
78 extern const struct eventop win32ops
;
81 /* In order of preference */
82 static const struct eventop
*eventops
[] = {
83 #ifdef HAVE_EVENT_PORTS
86 #ifdef HAVE_WORKING_KQUEUE
108 struct event_base
*current_base
= NULL
;
109 extern struct event_base
*evsignal_base
;
110 static int use_monotonic
= 1;
113 static void event_queue_insert(struct event_base
*, struct event
*, int);
114 static void event_queue_remove(struct event_base
*, struct event
*, int);
115 static int event_haveevents(struct event_base
*);
117 static void event_process_active(struct event_base
*);
119 static int timeout_next(struct event_base
*, struct timeval
**);
120 static void timeout_process(struct event_base
*);
121 static void timeout_correct(struct event_base
*, struct timeval
*);
124 gettime(struct event_base
*base
, struct timeval
*tp
)
126 if (base
->tv_cache
.tv_sec
) {
127 *tp
= base
->tv_cache
;
131 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
135 clock_gettime(CLOCK_MONOTONIC
, &ts
) == 0) {
136 tp
->tv_sec
= ts
.tv_sec
;
137 tp
->tv_usec
= ts
.tv_nsec
/ 1000;
144 return (evutil_gettimeofday(tp
, NULL
));
150 struct event_base
*base
= event_base_new();
162 struct event_base
*base
;
164 if ((base
= calloc(1, sizeof(struct event_base
))) == NULL
)
165 event_err(1, "%s: calloc", __func__
);
167 gettime(base
, &base
->event_tv
);
169 min_heap_ctor(&base
->timeheap
);
170 TAILQ_INIT(&base
->eventqueue
);
171 base
->sig
.ev_signal_pair
[0] = -1;
172 base
->sig
.ev_signal_pair
[1] = -1;
175 for (i
= 0; eventops
[i
] && !base
->evbase
; i
++) {
176 base
->evsel
= eventops
[i
];
178 base
->evbase
= base
->evsel
->init(base
);
181 if (base
->evbase
== NULL
)
182 event_errx(1, "%s: no event mechanism available", __func__
);
184 if (evutil_getenv("EVENT_SHOW_METHOD"))
185 event_msgx("libevent using: %s\n",
188 /* allocate a single active event queue */
189 event_base_priority_init(base
, 1);
195 event_base_free(struct event_base
*base
)
200 if (base
== NULL
&& current_base
)
202 if (base
== current_base
)
205 /* XXX(niels) - check for internal events first */
207 /* Delete all non-internal events. */
208 for (ev
= TAILQ_FIRST(&base
->eventqueue
); ev
; ) {
209 struct event
*next
= TAILQ_NEXT(ev
, ev_next
);
210 if (!(ev
->ev_flags
& EVLIST_INTERNAL
)) {
216 while ((ev
= min_heap_top(&base
->timeheap
)) != NULL
) {
221 for (i
= 0; i
< base
->nactivequeues
; ++i
) {
222 for (ev
= TAILQ_FIRST(base
->activequeues
[i
]); ev
; ) {
223 struct event
*next
= TAILQ_NEXT(ev
, ev_active_next
);
224 if (!(ev
->ev_flags
& EVLIST_INTERNAL
)) {
233 event_debug(("%s: %d events were still set in base",
234 __func__
, n_deleted
));
236 if (base
->evsel
->dealloc
!= NULL
)
237 base
->evsel
->dealloc(base
, base
->evbase
);
239 for (i
= 0; i
< base
->nactivequeues
; ++i
)
240 assert(TAILQ_EMPTY(base
->activequeues
[i
]));
242 assert(min_heap_empty(&base
->timeheap
));
243 min_heap_dtor(&base
->timeheap
);
245 for (i
= 0; i
< base
->nactivequeues
; ++i
)
246 free(base
->activequeues
[i
]);
247 free(base
->activequeues
);
249 assert(TAILQ_EMPTY(&base
->eventqueue
));
254 /* reinitialized the event base after a fork */
256 event_reinit(struct event_base
*base
)
258 const struct eventop
*evsel
= base
->evsel
;
259 void *evbase
= base
->evbase
;
263 /* check if this event mechanism requires reinit */
264 if (!evsel
->need_reinit
)
267 /* prevent internal delete */
268 if (base
->sig
.ev_signal_added
) {
269 /* we cannot call event_del here because the base has
270 * not been reinitialized yet. */
271 event_queue_remove(base
, &base
->sig
.ev_signal
,
273 if (base
->sig
.ev_signal
.ev_flags
& EVLIST_ACTIVE
)
274 event_queue_remove(base
, &base
->sig
.ev_signal
,
276 base
->sig
.ev_signal_added
= 0;
279 if (base
->evsel
->dealloc
!= NULL
)
280 base
->evsel
->dealloc(base
, base
->evbase
);
281 evbase
= base
->evbase
= evsel
->init(base
);
282 if (base
->evbase
== NULL
)
283 event_errx(1, "%s: could not reinitialize event mechanism",
286 TAILQ_FOREACH(ev
, &base
->eventqueue
, ev_next
) {
287 if (evsel
->add(evbase
, ev
) == -1)
295 event_priority_init(int npriorities
)
297 return event_base_priority_init(current_base
, npriorities
);
301 event_base_priority_init(struct event_base
*base
, int npriorities
)
305 if (base
->event_count_active
)
308 if (base
->nactivequeues
&& npriorities
!= base
->nactivequeues
) {
309 for (i
= 0; i
< base
->nactivequeues
; ++i
) {
310 free(base
->activequeues
[i
]);
312 free(base
->activequeues
);
315 /* Allocate our priority queues */
316 base
->nactivequeues
= npriorities
;
317 base
->activequeues
= (struct event_list
**)
318 calloc(base
->nactivequeues
, sizeof(struct event_list
*));
319 if (base
->activequeues
== NULL
)
320 event_err(1, "%s: calloc", __func__
);
322 for (i
= 0; i
< base
->nactivequeues
; ++i
) {
323 base
->activequeues
[i
] = malloc(sizeof(struct event_list
));
324 if (base
->activequeues
[i
] == NULL
)
325 event_err(1, "%s: malloc", __func__
);
326 TAILQ_INIT(base
->activequeues
[i
]);
333 event_haveevents(struct event_base
*base
)
335 return (base
->event_count
> 0);
339 * Active events are stored in priority queues. Lower priorities are always
340 * process before higher priorities. Low priority events can starve high
345 event_process_active(struct event_base
*base
)
348 struct event_list
*activeq
= NULL
;
352 for (i
= 0; i
< base
->nactivequeues
; ++i
) {
353 if (TAILQ_FIRST(base
->activequeues
[i
]) != NULL
) {
354 activeq
= base
->activequeues
[i
];
359 assert(activeq
!= NULL
);
361 for (ev
= TAILQ_FIRST(activeq
); ev
; ev
= TAILQ_FIRST(activeq
)) {
362 if (ev
->ev_events
& EV_PERSIST
)
363 event_queue_remove(base
, ev
, EVLIST_ACTIVE
);
367 /* Allows deletes to work */
368 ncalls
= ev
->ev_ncalls
;
369 ev
->ev_pncalls
= &ncalls
;
372 ev
->ev_ncalls
= ncalls
;
373 (*ev
->ev_callback
)((int)ev
->ev_fd
, ev
->ev_res
, ev
->ev_arg
);
374 if (base
->event_break
)
381 * Wait continously for events. We exit only if no events are left.
387 return (event_loop(0));
391 event_base_dispatch(struct event_base
*event_base
)
393 return (event_base_loop(event_base
, 0));
397 event_base_get_method(struct event_base
*base
)
400 return (base
->evsel
->name
);
404 event_loopexit_cb(int fd
, short what
, void *arg
)
406 struct event_base
*base
= arg
;
407 base
->event_gotterm
= 1;
410 /* not thread safe */
412 event_loopexit(const struct timeval
*tv
)
414 return (event_once(-1, EV_TIMEOUT
, event_loopexit_cb
,
419 event_base_loopexit(struct event_base
*event_base
, const struct timeval
*tv
)
421 return (event_base_once(event_base
, -1, EV_TIMEOUT
, event_loopexit_cb
,
425 /* not thread safe */
427 event_loopbreak(void)
429 return (event_base_loopbreak(current_base
));
433 event_base_loopbreak(struct event_base
*event_base
)
435 if (event_base
== NULL
)
438 event_base
->event_break
= 1;
444 /* not thread safe */
447 event_loop(int flags
)
449 return event_base_loop(current_base
, flags
);
453 event_base_loop(struct event_base
*base
, int flags
)
455 const struct eventop
*evsel
= base
->evsel
;
456 void *evbase
= base
->evbase
;
458 struct timeval
*tv_p
;
461 /* clear time cache */
462 base
->tv_cache
.tv_sec
= 0;
464 if (base
->sig
.ev_signal_added
)
465 evsignal_base
= base
;
468 /* Terminate the loop if we have been asked to */
469 if (base
->event_gotterm
) {
470 base
->event_gotterm
= 0;
474 if (base
->event_break
) {
475 base
->event_break
= 0;
479 timeout_correct(base
, &tv
);
482 if (!base
->event_count_active
&& !(flags
& EVLOOP_NONBLOCK
)) {
483 timeout_next(base
, &tv_p
);
486 * if we have active events, we just poll new events
489 evutil_timerclear(&tv
);
492 /* If we have no events, we just exit */
493 if (!event_haveevents(base
)) {
494 event_debug(("%s: no events registered.", __func__
));
498 /* update last old time */
499 gettime(base
, &base
->event_tv
);
501 /* clear time cache */
502 base
->tv_cache
.tv_sec
= 0;
504 res
= evsel
->dispatch(base
, evbase
, tv_p
);
508 gettime(base
, &base
->tv_cache
);
510 timeout_process(base
);
512 if (base
->event_count_active
) {
513 event_process_active(base
);
514 if (!base
->event_count_active
&& (flags
& EVLOOP_ONCE
))
516 } else if (flags
& EVLOOP_NONBLOCK
)
520 /* clear time cache */
521 base
->tv_cache
.tv_sec
= 0;
523 event_debug(("%s: asked to terminate loop.", __func__
));
527 /* Sets up an event for processing once */
532 void (*cb
)(int, short, void *);
536 /* One-time callback, it deletes itself */
539 event_once_cb(int fd
, short events
, void *arg
)
541 struct event_once
*eonce
= arg
;
543 (*eonce
->cb
)(fd
, events
, eonce
->arg
);
547 /* not threadsafe, event scheduled once. */
549 event_once(int fd
, short events
,
550 void (*callback
)(int, short, void *), void *arg
, const struct timeval
*tv
)
552 return event_base_once(current_base
, fd
, events
, callback
, arg
, tv
);
555 /* Schedules an event once */
557 event_base_once(struct event_base
*base
, int fd
, short events
,
558 void (*callback
)(int, short, void *), void *arg
, const struct timeval
*tv
)
560 struct event_once
*eonce
;
564 /* We cannot support signals that just fire once */
565 if (events
& EV_SIGNAL
)
568 if ((eonce
= calloc(1, sizeof(struct event_once
))) == NULL
)
571 eonce
->cb
= callback
;
574 if (events
== EV_TIMEOUT
) {
576 evutil_timerclear(&etv
);
580 evtimer_set(&eonce
->ev
, event_once_cb
, eonce
);
581 } else if (events
& (EV_READ
|EV_WRITE
)) {
582 events
&= EV_READ
|EV_WRITE
;
584 event_set(&eonce
->ev
, fd
, events
, event_once_cb
, eonce
);
586 /* Bad event combination */
591 res
= event_base_set(base
, &eonce
->ev
);
593 res
= event_add(&eonce
->ev
, tv
);
603 event_set(struct event
*ev
, int fd
, short events
,
604 void (*callback
)(int, short, void *), void *arg
)
606 /* Take the current base - caller needs to set the real base later */
607 ev
->ev_base
= current_base
;
609 ev
->ev_callback
= callback
;
612 ev
->ev_events
= events
;
614 ev
->ev_flags
= EVLIST_INIT
;
616 ev
->ev_pncalls
= NULL
;
618 min_heap_elem_init(ev
);
620 /* by default, we put new events into the middle priority */
622 ev
->ev_pri
= current_base
->nactivequeues
/2;
626 event_base_set(struct event_base
*base
, struct event
*ev
)
628 /* Only innocent events may be assigned to a different base */
629 if (ev
->ev_flags
!= EVLIST_INIT
)
633 ev
->ev_pri
= base
->nactivequeues
/2;
639 * Set's the priority of an event - if an event is already scheduled
640 * changing the priority is going to fail.
644 event_priority_set(struct event
*ev
, int pri
)
646 if (ev
->ev_flags
& EVLIST_ACTIVE
)
648 if (pri
< 0 || pri
>= ev
->ev_base
->nactivequeues
)
657 * Checks if a specific event is pending or scheduled.
661 event_pending(struct event
*ev
, short event
, struct timeval
*tv
)
663 struct timeval now
, res
;
666 if (ev
->ev_flags
& EVLIST_INSERTED
)
667 flags
|= (ev
->ev_events
& (EV_READ
|EV_WRITE
|EV_SIGNAL
));
668 if (ev
->ev_flags
& EVLIST_ACTIVE
)
670 if (ev
->ev_flags
& EVLIST_TIMEOUT
)
673 event
&= (EV_TIMEOUT
|EV_READ
|EV_WRITE
|EV_SIGNAL
);
675 /* See if there is a timeout that we should report */
676 if (tv
!= NULL
&& (flags
& event
& EV_TIMEOUT
)) {
677 gettime(ev
->ev_base
, &now
);
678 evutil_timersub(&ev
->ev_timeout
, &now
, &res
);
679 /* correctly remap to real time */
680 evutil_gettimeofday(&now
, NULL
);
681 evutil_timeradd(&now
, &res
, tv
);
684 return (flags
& event
);
688 event_add(struct event
*ev
, const struct timeval
*tv
)
690 struct event_base
*base
= ev
->ev_base
;
691 const struct eventop
*evsel
= base
->evsel
;
692 void *evbase
= base
->evbase
;
696 "event_add: event: %p, %s%s%scall %p",
698 ev
->ev_events
& EV_READ
? "EV_READ " : " ",
699 ev
->ev_events
& EV_WRITE
? "EV_WRITE " : " ",
700 tv
? "EV_TIMEOUT " : " ",
703 assert(!(ev
->ev_flags
& ~EVLIST_ALL
));
706 * prepare for timeout insertion further below, if we get a
707 * failure on any step, we should not change any state.
709 if (tv
!= NULL
&& !(ev
->ev_flags
& EVLIST_TIMEOUT
)) {
710 if (min_heap_reserve(&base
->timeheap
,
711 1 + min_heap_size(&base
->timeheap
)) == -1)
712 return (-1); /* ENOMEM == errno */
715 if ((ev
->ev_events
& (EV_READ
|EV_WRITE
|EV_SIGNAL
)) &&
716 !(ev
->ev_flags
& (EVLIST_INSERTED
|EVLIST_ACTIVE
))) {
717 res
= evsel
->add(evbase
, ev
);
719 event_queue_insert(base
, ev
, EVLIST_INSERTED
);
723 * we should change the timout state only if the previous event
724 * addition succeeded.
726 if (res
!= -1 && tv
!= NULL
) {
730 * we already reserved memory above for the case where we
731 * are not replacing an exisiting timeout.
733 if (ev
->ev_flags
& EVLIST_TIMEOUT
)
734 event_queue_remove(base
, ev
, EVLIST_TIMEOUT
);
736 /* Check if it is active due to a timeout. Rescheduling
737 * this timeout before the callback can be executed
738 * removes it from the active list. */
739 if ((ev
->ev_flags
& EVLIST_ACTIVE
) &&
740 (ev
->ev_res
& EV_TIMEOUT
)) {
741 /* See if we are just active executing this
744 if (ev
->ev_ncalls
&& ev
->ev_pncalls
) {
749 event_queue_remove(base
, ev
, EVLIST_ACTIVE
);
753 evutil_timeradd(&now
, tv
, &ev
->ev_timeout
);
756 "event_add: timeout in %ld seconds, call %p",
757 tv
->tv_sec
, ev
->ev_callback
));
759 event_queue_insert(base
, ev
, EVLIST_TIMEOUT
);
766 event_del(struct event
*ev
)
768 struct event_base
*base
;
770 event_debug(("event_del: %p, callback %p",
771 ev
, ev
->ev_callback
));
773 /* An event without a base has not been added */
774 if (ev
->ev_base
== NULL
)
779 assert(!(ev
->ev_flags
& ~EVLIST_ALL
));
781 /* See if we are just active executing this event in a loop */
782 if (ev
->ev_ncalls
&& ev
->ev_pncalls
) {
787 if (ev
->ev_flags
& EVLIST_TIMEOUT
)
788 event_queue_remove(base
, ev
, EVLIST_TIMEOUT
);
790 if (ev
->ev_flags
& EVLIST_ACTIVE
)
791 event_queue_remove(base
, ev
, EVLIST_ACTIVE
);
793 if (ev
->ev_flags
& EVLIST_INSERTED
) {
794 event_queue_remove(base
, ev
, EVLIST_INSERTED
);
795 return (base
->evsel
->del(base
->evbase
, ev
));
802 event_active(struct event
*ev
, int res
, short ncalls
)
804 /* We get different kinds of events, add them together */
805 if (ev
->ev_flags
& EVLIST_ACTIVE
) {
811 ev
->ev_ncalls
= ncalls
;
812 ev
->ev_pncalls
= NULL
;
813 event_queue_insert(ev
->ev_base
, ev
, EVLIST_ACTIVE
);
817 timeout_next(struct event_base
*base
, struct timeval
**tv_p
)
821 struct timeval
*tv
= *tv_p
;
823 if ((ev
= min_heap_top(&base
->timeheap
)) == NULL
) {
824 /* if no time-based events are active wait for I/O */
829 if (gettime(base
, &now
) == -1)
832 if (evutil_timercmp(&ev
->ev_timeout
, &now
, <=)) {
833 evutil_timerclear(tv
);
837 evutil_timersub(&ev
->ev_timeout
, &now
, tv
);
839 assert(tv
->tv_sec
>= 0);
840 assert(tv
->tv_usec
>= 0);
842 event_debug(("timeout_next: in %ld seconds", tv
->tv_sec
));
847 * Determines if the time is running backwards by comparing the current
848 * time against the last time we checked. Not needed when using clock
853 timeout_correct(struct event_base
*base
, struct timeval
*tv
)
862 /* Check if time is running backwards */
864 if (evutil_timercmp(tv
, &base
->event_tv
, >=)) {
865 base
->event_tv
= *tv
;
869 event_debug(("%s: time is running backwards, corrected",
871 evutil_timersub(&base
->event_tv
, tv
, &off
);
874 * We can modify the key element of the node without destroying
875 * the key, beause we apply it to all in the right order.
877 pev
= base
->timeheap
.p
;
878 size
= base
->timeheap
.n
;
879 for (; size
-- > 0; ++pev
) {
880 struct timeval
*ev_tv
= &(**pev
).ev_timeout
;
881 evutil_timersub(ev_tv
, &off
, ev_tv
);
883 /* Now remember what the new time turned out to be. */
884 base
->event_tv
= *tv
;
888 timeout_process(struct event_base
*base
)
893 if (min_heap_empty(&base
->timeheap
))
898 while ((ev
= min_heap_top(&base
->timeheap
))) {
899 if (evutil_timercmp(&ev
->ev_timeout
, &now
, >))
902 /* delete this event from the I/O queues */
905 event_debug(("timeout_process: call %p",
907 event_active(ev
, EV_TIMEOUT
, 1);
912 event_queue_remove(struct event_base
*base
, struct event
*ev
, int queue
)
914 if (!(ev
->ev_flags
& queue
))
915 event_errx(1, "%s: %p(fd %d) not on queue %x", __func__
,
916 ev
, ev
->ev_fd
, queue
);
918 if (~ev
->ev_flags
& EVLIST_INTERNAL
)
921 ev
->ev_flags
&= ~queue
;
923 case EVLIST_INSERTED
:
924 TAILQ_REMOVE(&base
->eventqueue
, ev
, ev_next
);
927 base
->event_count_active
--;
928 TAILQ_REMOVE(base
->activequeues
[ev
->ev_pri
],
932 min_heap_erase(&base
->timeheap
, ev
);
935 event_errx(1, "%s: unknown queue %x", __func__
, queue
);
940 event_queue_insert(struct event_base
*base
, struct event
*ev
, int queue
)
942 if (ev
->ev_flags
& queue
) {
943 /* Double insertion is possible for active events */
944 if (queue
& EVLIST_ACTIVE
)
947 event_errx(1, "%s: %p(fd %d) already on queue %x", __func__
,
948 ev
, ev
->ev_fd
, queue
);
951 if (~ev
->ev_flags
& EVLIST_INTERNAL
)
954 ev
->ev_flags
|= queue
;
956 case EVLIST_INSERTED
:
957 TAILQ_INSERT_TAIL(&base
->eventqueue
, ev
, ev_next
);
960 base
->event_count_active
++;
961 TAILQ_INSERT_TAIL(base
->activequeues
[ev
->ev_pri
],
964 case EVLIST_TIMEOUT
: {
965 min_heap_push(&base
->timeheap
, ev
);
969 event_errx(1, "%s: unknown queue %x", __func__
, queue
);
973 /* Functions for debugging */
976 event_get_version(void)
982 * No thread-safe interface needed - the information should be the same
987 event_get_method(void)
989 return (current_base
->evsel
->name
);