2 * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #define WIN32_LEAN_AND_MEAN
34 #undef WIN32_LEAN_AND_MEAN
36 #include <sys/types.h>
37 #ifdef HAVE_SYS_TIME_H
40 #include <sys/_libevent_time.h>
42 #include <sys/queue.h>
55 #include "event-internal.h"
59 #ifdef HAVE_EVENT_PORTS
60 extern const struct eventop evportops
;
63 extern const struct eventop selectops
;
66 extern const struct eventop pollops
;
69 extern const struct eventop epollops
;
71 #ifdef HAVE_WORKING_KQUEUE
72 extern const struct eventop kqops
;
75 extern const struct eventop devpollops
;
78 extern const struct eventop win32ops
;
81 /* In order of preference */
82 static const struct eventop
*eventops
[] = {
83 #ifdef HAVE_EVENT_PORTS
86 #ifdef HAVE_WORKING_KQUEUE
108 struct event_base
*current_base
= NULL
;
109 extern struct event_base
*evsignal_base
;
110 static int use_monotonic
;
113 static void event_queue_insert(struct event_base
*, struct event
*, int);
114 static void event_queue_remove(struct event_base
*, struct event
*, int);
115 static int event_haveevents(struct event_base
*);
117 static void event_process_active(struct event_base
*);
119 static int timeout_next(struct event_base
*, struct timeval
**);
120 static void timeout_process(struct event_base
*);
121 static void timeout_correct(struct event_base
*, struct timeval
*);
124 detect_monotonic(void)
126 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
129 if (clock_gettime(CLOCK_MONOTONIC
, &ts
) == 0)
135 gettime(struct event_base
*base
, struct timeval
*tp
)
137 if (base
->tv_cache
.tv_sec
) {
138 *tp
= base
->tv_cache
;
142 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
146 if (clock_gettime(CLOCK_MONOTONIC
, &ts
) == -1)
149 tp
->tv_sec
= ts
.tv_sec
;
150 tp
->tv_usec
= ts
.tv_nsec
/ 1000;
155 return (evutil_gettimeofday(tp
, NULL
));
161 struct event_base
*base
= event_base_new();
173 struct event_base
*base
;
175 if ((base
= calloc(1, sizeof(struct event_base
))) == NULL
)
176 event_err(1, "%s: calloc", __func__
);
179 gettime(base
, &base
->event_tv
);
181 min_heap_ctor(&base
->timeheap
);
182 TAILQ_INIT(&base
->eventqueue
);
183 base
->sig
.ev_signal_pair
[0] = -1;
184 base
->sig
.ev_signal_pair
[1] = -1;
187 for (i
= 0; eventops
[i
] && !base
->evbase
; i
++) {
188 base
->evsel
= eventops
[i
];
190 base
->evbase
= base
->evsel
->init(base
);
193 if (base
->evbase
== NULL
)
194 event_errx(1, "%s: no event mechanism available", __func__
);
196 if (evutil_getenv("EVENT_SHOW_METHOD"))
197 event_msgx("libevent using: %s\n",
200 /* allocate a single active event queue */
201 event_base_priority_init(base
, 1);
207 event_base_free(struct event_base
*base
)
212 if (base
== NULL
&& current_base
)
214 if (base
== current_base
)
217 /* XXX(niels) - check for internal events first */
219 /* Delete all non-internal events. */
220 for (ev
= TAILQ_FIRST(&base
->eventqueue
); ev
; ) {
221 struct event
*next
= TAILQ_NEXT(ev
, ev_next
);
222 if (!(ev
->ev_flags
& EVLIST_INTERNAL
)) {
228 while ((ev
= min_heap_top(&base
->timeheap
)) != NULL
) {
233 for (i
= 0; i
< base
->nactivequeues
; ++i
) {
234 for (ev
= TAILQ_FIRST(base
->activequeues
[i
]); ev
; ) {
235 struct event
*next
= TAILQ_NEXT(ev
, ev_active_next
);
236 if (!(ev
->ev_flags
& EVLIST_INTERNAL
)) {
245 event_debug(("%s: %d events were still set in base",
246 __func__
, n_deleted
));
248 if (base
->evsel
->dealloc
!= NULL
)
249 base
->evsel
->dealloc(base
, base
->evbase
);
251 for (i
= 0; i
< base
->nactivequeues
; ++i
)
252 assert(TAILQ_EMPTY(base
->activequeues
[i
]));
254 assert(min_heap_empty(&base
->timeheap
));
255 min_heap_dtor(&base
->timeheap
);
257 for (i
= 0; i
< base
->nactivequeues
; ++i
)
258 free(base
->activequeues
[i
]);
259 free(base
->activequeues
);
261 assert(TAILQ_EMPTY(&base
->eventqueue
));
266 /* reinitialized the event base after a fork */
268 event_reinit(struct event_base
*base
)
270 const struct eventop
*evsel
= base
->evsel
;
271 void *evbase
= base
->evbase
;
275 /* check if this event mechanism requires reinit */
276 if (!evsel
->need_reinit
)
279 /* prevent internal delete */
280 if (base
->sig
.ev_signal_added
) {
281 /* we cannot call event_del here because the base has
282 * not been reinitialized yet. */
283 event_queue_remove(base
, &base
->sig
.ev_signal
,
285 if (base
->sig
.ev_signal
.ev_flags
& EVLIST_ACTIVE
)
286 event_queue_remove(base
, &base
->sig
.ev_signal
,
288 base
->sig
.ev_signal_added
= 0;
291 if (base
->evsel
->dealloc
!= NULL
)
292 base
->evsel
->dealloc(base
, base
->evbase
);
293 evbase
= base
->evbase
= evsel
->init(base
);
294 if (base
->evbase
== NULL
)
295 event_errx(1, "%s: could not reinitialize event mechanism",
298 TAILQ_FOREACH(ev
, &base
->eventqueue
, ev_next
) {
299 if (evsel
->add(evbase
, ev
) == -1)
307 event_priority_init(int npriorities
)
309 return event_base_priority_init(current_base
, npriorities
);
313 event_base_priority_init(struct event_base
*base
, int npriorities
)
317 if (base
->event_count_active
)
320 if (base
->nactivequeues
&& npriorities
!= base
->nactivequeues
) {
321 for (i
= 0; i
< base
->nactivequeues
; ++i
) {
322 free(base
->activequeues
[i
]);
324 free(base
->activequeues
);
327 /* Allocate our priority queues */
328 base
->nactivequeues
= npriorities
;
329 base
->activequeues
= (struct event_list
**)
330 calloc(base
->nactivequeues
, sizeof(struct event_list
*));
331 if (base
->activequeues
== NULL
)
332 event_err(1, "%s: calloc", __func__
);
334 for (i
= 0; i
< base
->nactivequeues
; ++i
) {
335 base
->activequeues
[i
] = malloc(sizeof(struct event_list
));
336 if (base
->activequeues
[i
] == NULL
)
337 event_err(1, "%s: malloc", __func__
);
338 TAILQ_INIT(base
->activequeues
[i
]);
345 event_haveevents(struct event_base
*base
)
347 return (base
->event_count
> 0);
351 * Active events are stored in priority queues. Lower priorities are always
352 * process before higher priorities. Low priority events can starve high
357 event_process_active(struct event_base
*base
)
360 struct event_list
*activeq
= NULL
;
364 for (i
= 0; i
< base
->nactivequeues
; ++i
) {
365 if (TAILQ_FIRST(base
->activequeues
[i
]) != NULL
) {
366 activeq
= base
->activequeues
[i
];
371 assert(activeq
!= NULL
);
373 for (ev
= TAILQ_FIRST(activeq
); ev
; ev
= TAILQ_FIRST(activeq
)) {
374 if (ev
->ev_events
& EV_PERSIST
)
375 event_queue_remove(base
, ev
, EVLIST_ACTIVE
);
379 /* Allows deletes to work */
380 ncalls
= ev
->ev_ncalls
;
381 ev
->ev_pncalls
= &ncalls
;
384 ev
->ev_ncalls
= ncalls
;
385 (*ev
->ev_callback
)((int)ev
->ev_fd
, ev
->ev_res
, ev
->ev_arg
);
386 if (base
->event_break
)
393 * Wait continously for events. We exit only if no events are left.
399 return (event_loop(0));
403 event_base_dispatch(struct event_base
*event_base
)
405 return (event_base_loop(event_base
, 0));
409 event_base_get_method(struct event_base
*base
)
412 return (base
->evsel
->name
);
416 event_loopexit_cb(int fd
, short what
, void *arg
)
418 struct event_base
*base
= arg
;
419 base
->event_gotterm
= 1;
422 /* not thread safe */
424 event_loopexit(const struct timeval
*tv
)
426 return (event_once(-1, EV_TIMEOUT
, event_loopexit_cb
,
431 event_base_loopexit(struct event_base
*event_base
, const struct timeval
*tv
)
433 return (event_base_once(event_base
, -1, EV_TIMEOUT
, event_loopexit_cb
,
437 /* not thread safe */
439 event_loopbreak(void)
441 return (event_base_loopbreak(current_base
));
445 event_base_loopbreak(struct event_base
*event_base
)
447 if (event_base
== NULL
)
450 event_base
->event_break
= 1;
456 /* not thread safe */
459 event_loop(int flags
)
461 return event_base_loop(current_base
, flags
);
465 event_base_loop(struct event_base
*base
, int flags
)
467 const struct eventop
*evsel
= base
->evsel
;
468 void *evbase
= base
->evbase
;
470 struct timeval
*tv_p
;
473 /* clear time cache */
474 base
->tv_cache
.tv_sec
= 0;
476 if (base
->sig
.ev_signal_added
)
477 evsignal_base
= base
;
480 /* Terminate the loop if we have been asked to */
481 if (base
->event_gotterm
) {
482 base
->event_gotterm
= 0;
486 if (base
->event_break
) {
487 base
->event_break
= 0;
491 timeout_correct(base
, &tv
);
494 if (!base
->event_count_active
&& !(flags
& EVLOOP_NONBLOCK
)) {
495 timeout_next(base
, &tv_p
);
498 * if we have active events, we just poll new events
501 evutil_timerclear(&tv
);
504 /* If we have no events, we just exit */
505 if (!event_haveevents(base
)) {
506 event_debug(("%s: no events registered.", __func__
));
510 /* update last old time */
511 gettime(base
, &base
->event_tv
);
513 /* clear time cache */
514 base
->tv_cache
.tv_sec
= 0;
516 res
= evsel
->dispatch(base
, evbase
, tv_p
);
520 gettime(base
, &base
->tv_cache
);
522 timeout_process(base
);
524 if (base
->event_count_active
) {
525 event_process_active(base
);
526 if (!base
->event_count_active
&& (flags
& EVLOOP_ONCE
))
528 } else if (flags
& EVLOOP_NONBLOCK
)
532 /* clear time cache */
533 base
->tv_cache
.tv_sec
= 0;
535 event_debug(("%s: asked to terminate loop.", __func__
));
539 /* Sets up an event for processing once */
544 void (*cb
)(int, short, void *);
548 /* One-time callback, it deletes itself */
551 event_once_cb(int fd
, short events
, void *arg
)
553 struct event_once
*eonce
= arg
;
555 (*eonce
->cb
)(fd
, events
, eonce
->arg
);
559 /* not threadsafe, event scheduled once. */
561 event_once(int fd
, short events
,
562 void (*callback
)(int, short, void *), void *arg
, const struct timeval
*tv
)
564 return event_base_once(current_base
, fd
, events
, callback
, arg
, tv
);
567 /* Schedules an event once */
569 event_base_once(struct event_base
*base
, int fd
, short events
,
570 void (*callback
)(int, short, void *), void *arg
, const struct timeval
*tv
)
572 struct event_once
*eonce
;
576 /* We cannot support signals that just fire once */
577 if (events
& EV_SIGNAL
)
580 if ((eonce
= calloc(1, sizeof(struct event_once
))) == NULL
)
583 eonce
->cb
= callback
;
586 if (events
== EV_TIMEOUT
) {
588 evutil_timerclear(&etv
);
592 evtimer_set(&eonce
->ev
, event_once_cb
, eonce
);
593 } else if (events
& (EV_READ
|EV_WRITE
)) {
594 events
&= EV_READ
|EV_WRITE
;
596 event_set(&eonce
->ev
, fd
, events
, event_once_cb
, eonce
);
598 /* Bad event combination */
603 res
= event_base_set(base
, &eonce
->ev
);
605 res
= event_add(&eonce
->ev
, tv
);
615 event_set(struct event
*ev
, int fd
, short events
,
616 void (*callback
)(int, short, void *), void *arg
)
618 /* Take the current base - caller needs to set the real base later */
619 ev
->ev_base
= current_base
;
621 ev
->ev_callback
= callback
;
624 ev
->ev_events
= events
;
626 ev
->ev_flags
= EVLIST_INIT
;
628 ev
->ev_pncalls
= NULL
;
630 min_heap_elem_init(ev
);
632 /* by default, we put new events into the middle priority */
634 ev
->ev_pri
= current_base
->nactivequeues
/2;
638 event_base_set(struct event_base
*base
, struct event
*ev
)
640 /* Only innocent events may be assigned to a different base */
641 if (ev
->ev_flags
!= EVLIST_INIT
)
645 ev
->ev_pri
= base
->nactivequeues
/2;
651 * Set's the priority of an event - if an event is already scheduled
652 * changing the priority is going to fail.
656 event_priority_set(struct event
*ev
, int pri
)
658 if (ev
->ev_flags
& EVLIST_ACTIVE
)
660 if (pri
< 0 || pri
>= ev
->ev_base
->nactivequeues
)
669 * Checks if a specific event is pending or scheduled.
673 event_pending(struct event
*ev
, short event
, struct timeval
*tv
)
675 struct timeval now
, res
;
678 if (ev
->ev_flags
& EVLIST_INSERTED
)
679 flags
|= (ev
->ev_events
& (EV_READ
|EV_WRITE
|EV_SIGNAL
));
680 if (ev
->ev_flags
& EVLIST_ACTIVE
)
682 if (ev
->ev_flags
& EVLIST_TIMEOUT
)
685 event
&= (EV_TIMEOUT
|EV_READ
|EV_WRITE
|EV_SIGNAL
);
687 /* See if there is a timeout that we should report */
688 if (tv
!= NULL
&& (flags
& event
& EV_TIMEOUT
)) {
689 gettime(ev
->ev_base
, &now
);
690 evutil_timersub(&ev
->ev_timeout
, &now
, &res
);
691 /* correctly remap to real time */
692 evutil_gettimeofday(&now
, NULL
);
693 evutil_timeradd(&now
, &res
, tv
);
696 return (flags
& event
);
700 event_add(struct event
*ev
, const struct timeval
*tv
)
702 struct event_base
*base
= ev
->ev_base
;
703 const struct eventop
*evsel
= base
->evsel
;
704 void *evbase
= base
->evbase
;
708 "event_add: event: %p, %s%s%scall %p",
710 ev
->ev_events
& EV_READ
? "EV_READ " : " ",
711 ev
->ev_events
& EV_WRITE
? "EV_WRITE " : " ",
712 tv
? "EV_TIMEOUT " : " ",
715 assert(!(ev
->ev_flags
& ~EVLIST_ALL
));
718 * prepare for timeout insertion further below, if we get a
719 * failure on any step, we should not change any state.
721 if (tv
!= NULL
&& !(ev
->ev_flags
& EVLIST_TIMEOUT
)) {
722 if (min_heap_reserve(&base
->timeheap
,
723 1 + min_heap_size(&base
->timeheap
)) == -1)
724 return (-1); /* ENOMEM == errno */
727 if ((ev
->ev_events
& (EV_READ
|EV_WRITE
|EV_SIGNAL
)) &&
728 !(ev
->ev_flags
& (EVLIST_INSERTED
|EVLIST_ACTIVE
))) {
729 res
= evsel
->add(evbase
, ev
);
731 event_queue_insert(base
, ev
, EVLIST_INSERTED
);
735 * we should change the timout state only if the previous event
736 * addition succeeded.
738 if (res
!= -1 && tv
!= NULL
) {
742 * we already reserved memory above for the case where we
743 * are not replacing an exisiting timeout.
745 if (ev
->ev_flags
& EVLIST_TIMEOUT
)
746 event_queue_remove(base
, ev
, EVLIST_TIMEOUT
);
748 /* Check if it is active due to a timeout. Rescheduling
749 * this timeout before the callback can be executed
750 * removes it from the active list. */
751 if ((ev
->ev_flags
& EVLIST_ACTIVE
) &&
752 (ev
->ev_res
& EV_TIMEOUT
)) {
753 /* See if we are just active executing this
756 if (ev
->ev_ncalls
&& ev
->ev_pncalls
) {
761 event_queue_remove(base
, ev
, EVLIST_ACTIVE
);
765 evutil_timeradd(&now
, tv
, &ev
->ev_timeout
);
768 "event_add: timeout in %ld seconds, call %p",
769 tv
->tv_sec
, ev
->ev_callback
));
771 event_queue_insert(base
, ev
, EVLIST_TIMEOUT
);
778 event_del(struct event
*ev
)
780 struct event_base
*base
;
782 event_debug(("event_del: %p, callback %p",
783 ev
, ev
->ev_callback
));
785 /* An event without a base has not been added */
786 if (ev
->ev_base
== NULL
)
791 assert(!(ev
->ev_flags
& ~EVLIST_ALL
));
793 /* See if we are just active executing this event in a loop */
794 if (ev
->ev_ncalls
&& ev
->ev_pncalls
) {
799 if (ev
->ev_flags
& EVLIST_TIMEOUT
)
800 event_queue_remove(base
, ev
, EVLIST_TIMEOUT
);
802 if (ev
->ev_flags
& EVLIST_ACTIVE
)
803 event_queue_remove(base
, ev
, EVLIST_ACTIVE
);
805 if (ev
->ev_flags
& EVLIST_INSERTED
) {
806 event_queue_remove(base
, ev
, EVLIST_INSERTED
);
807 return (base
->evsel
->del(base
->evbase
, ev
));
814 event_active(struct event
*ev
, int res
, short ncalls
)
816 /* We get different kinds of events, add them together */
817 if (ev
->ev_flags
& EVLIST_ACTIVE
) {
823 ev
->ev_ncalls
= ncalls
;
824 ev
->ev_pncalls
= NULL
;
825 event_queue_insert(ev
->ev_base
, ev
, EVLIST_ACTIVE
);
829 timeout_next(struct event_base
*base
, struct timeval
**tv_p
)
833 struct timeval
*tv
= *tv_p
;
835 if ((ev
= min_heap_top(&base
->timeheap
)) == NULL
) {
836 /* if no time-based events are active wait for I/O */
841 if (gettime(base
, &now
) == -1)
844 if (evutil_timercmp(&ev
->ev_timeout
, &now
, <=)) {
845 evutil_timerclear(tv
);
849 evutil_timersub(&ev
->ev_timeout
, &now
, tv
);
851 assert(tv
->tv_sec
>= 0);
852 assert(tv
->tv_usec
>= 0);
854 event_debug(("timeout_next: in %ld seconds", tv
->tv_sec
));
859 * Determines if the time is running backwards by comparing the current
860 * time against the last time we checked. Not needed when using clock
865 timeout_correct(struct event_base
*base
, struct timeval
*tv
)
874 /* Check if time is running backwards */
876 if (evutil_timercmp(tv
, &base
->event_tv
, >=)) {
877 base
->event_tv
= *tv
;
881 event_debug(("%s: time is running backwards, corrected",
883 evutil_timersub(&base
->event_tv
, tv
, &off
);
886 * We can modify the key element of the node without destroying
887 * the key, beause we apply it to all in the right order.
889 pev
= base
->timeheap
.p
;
890 size
= base
->timeheap
.n
;
891 for (; size
-- > 0; ++pev
) {
892 struct timeval
*ev_tv
= &(**pev
).ev_timeout
;
893 evutil_timersub(ev_tv
, &off
, ev_tv
);
895 /* Now remember what the new time turned out to be. */
896 base
->event_tv
= *tv
;
900 timeout_process(struct event_base
*base
)
905 if (min_heap_empty(&base
->timeheap
))
910 while ((ev
= min_heap_top(&base
->timeheap
))) {
911 if (evutil_timercmp(&ev
->ev_timeout
, &now
, >))
914 /* delete this event from the I/O queues */
917 event_debug(("timeout_process: call %p",
919 event_active(ev
, EV_TIMEOUT
, 1);
924 event_queue_remove(struct event_base
*base
, struct event
*ev
, int queue
)
926 if (!(ev
->ev_flags
& queue
))
927 event_errx(1, "%s: %p(fd %d) not on queue %x", __func__
,
928 ev
, ev
->ev_fd
, queue
);
930 if (~ev
->ev_flags
& EVLIST_INTERNAL
)
933 ev
->ev_flags
&= ~queue
;
935 case EVLIST_INSERTED
:
936 TAILQ_REMOVE(&base
->eventqueue
, ev
, ev_next
);
939 base
->event_count_active
--;
940 TAILQ_REMOVE(base
->activequeues
[ev
->ev_pri
],
944 min_heap_erase(&base
->timeheap
, ev
);
947 event_errx(1, "%s: unknown queue %x", __func__
, queue
);
952 event_queue_insert(struct event_base
*base
, struct event
*ev
, int queue
)
954 if (ev
->ev_flags
& queue
) {
955 /* Double insertion is possible for active events */
956 if (queue
& EVLIST_ACTIVE
)
959 event_errx(1, "%s: %p(fd %d) already on queue %x", __func__
,
960 ev
, ev
->ev_fd
, queue
);
963 if (~ev
->ev_flags
& EVLIST_INTERNAL
)
966 ev
->ev_flags
|= queue
;
968 case EVLIST_INSERTED
:
969 TAILQ_INSERT_TAIL(&base
->eventqueue
, ev
, ev_next
);
972 base
->event_count_active
++;
973 TAILQ_INSERT_TAIL(base
->activequeues
[ev
->ev_pri
],
976 case EVLIST_TIMEOUT
: {
977 min_heap_push(&base
->timeheap
, ev
);
981 event_errx(1, "%s: unknown queue %x", __func__
, queue
);
985 /* Functions for debugging */
988 event_get_version(void)
994 * No thread-safe interface needed - the information should be the same
999 event_get_method(void)
1001 return (current_base
->evsel
->name
);