1 /* $NetBSD: event.c,v 1.3 2015/01/29 07:26:02 spz Exp $ */
3 * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
4 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "event2/event-config.h"
29 #include <sys/cdefs.h>
30 __RCSID("$NetBSD: event.c,v 1.3 2015/01/29 07:26:02 spz Exp $");
34 #define WIN32_LEAN_AND_MEAN
36 #undef WIN32_LEAN_AND_MEAN
38 #include <sys/types.h>
39 #if !defined(WIN32) && defined(_EVENT_HAVE_SYS_TIME_H)
42 #include <sys/queue.h>
43 #ifdef _EVENT_HAVE_SYS_SOCKET_H
44 #include <sys/socket.h>
48 #ifdef _EVENT_HAVE_UNISTD_H
51 #ifdef _EVENT_HAVE_SYS_EVENTFD_H
52 #include <sys/eventfd.h>
60 #include "event2/event.h"
61 #include "event2/event_struct.h"
62 #include "event2/event_compat.h"
63 #include "event-internal.h"
64 #include "defer-internal.h"
65 #include "evthread-internal.h"
66 #include "event2/thread.h"
67 #include "event2/util.h"
68 #include "log-internal.h"
69 #include "evmap-internal.h"
70 #include "iocp-internal.h"
71 #include "changelist-internal.h"
72 #include "ht-internal.h"
73 #include "util-internal.h"
75 #ifdef _EVENT_HAVE_EVENT_PORTS
76 extern const struct eventop evportops
;
78 #ifdef _EVENT_HAVE_SELECT
79 extern const struct eventop selectops
;
81 #ifdef _EVENT_HAVE_POLL
82 extern const struct eventop pollops
;
84 #ifdef _EVENT_HAVE_EPOLL
85 extern const struct eventop epollops
;
87 #ifdef _EVENT_HAVE_WORKING_KQUEUE
88 extern const struct eventop kqops
;
90 #ifdef _EVENT_HAVE_DEVPOLL
91 extern const struct eventop devpollops
;
94 extern const struct eventop win32ops
;
97 /* Array of backends in order of preference. */
98 static const struct eventop
*eventops
[] = {
99 #ifdef _EVENT_HAVE_EVENT_PORTS
102 #ifdef _EVENT_HAVE_WORKING_KQUEUE
105 #ifdef _EVENT_HAVE_EPOLL
108 #ifdef _EVENT_HAVE_DEVPOLL
111 #ifdef _EVENT_HAVE_POLL
114 #ifdef _EVENT_HAVE_SELECT
123 /* Global state; deprecated */
124 struct event_base
*event_global_current_base_
= NULL
;
125 #define current_base event_global_current_base_
129 static int use_monotonic
;
132 static inline int event_add_internal(struct event
*ev
,
133 const struct timeval
*tv
, int tv_is_absolute
);
134 static inline int event_del_internal(struct event
*ev
);
136 static void event_queue_insert(struct event_base
*, struct event
*, int);
137 static void event_queue_remove(struct event_base
*, struct event
*, int);
138 static int event_haveevents(struct event_base
*);
140 static int event_process_active(struct event_base
*);
142 static int timeout_next(struct event_base
*, struct timeval
**);
143 static void timeout_process(struct event_base
*);
144 static void timeout_correct(struct event_base
*, struct timeval
*);
146 static inline void event_signal_closure(struct event_base
*, struct event
*ev
);
147 static inline void event_persist_closure(struct event_base
*, struct event
*ev
);
149 static int evthread_notify_base(struct event_base
*base
);
151 #ifndef _EVENT_DISABLE_DEBUG_MODE
152 /* These functions implement a hashtable of which 'struct event *' structures
153 * have been setup or added. We don't want to trust the content of the struct
154 * event itself, since we're trying to work through cases where an event gets
155 * clobbered or freed. Instead, we keep a hashtable indexed by the pointer.
158 struct event_debug_entry
{
159 HT_ENTRY(event_debug_entry
) node
;
160 const struct event
*ptr
;
164 static inline unsigned
165 hash_debug_entry(const struct event_debug_entry
*e
)
167 /* We need to do this silliness to convince compilers that we
168 * honestly mean to cast e->ptr to an integer, and discard any
169 * part of it that doesn't fit in an unsigned.
171 unsigned u
= (unsigned) ((ev_uintptr_t
) e
->ptr
);
172 /* Our hashtable implementation is pretty sensitive to low bits,
173 * and every struct event is over 64 bytes in size, so we can
179 eq_debug_entry(const struct event_debug_entry
*a
,
180 const struct event_debug_entry
*b
)
182 return a
->ptr
== b
->ptr
;
185 int _event_debug_mode_on
= 0;
186 /* Set if it's too late to enable event_debug_mode. */
187 static int event_debug_mode_too_late
= 0;
188 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
189 static void *_event_debug_map_lock
= NULL
;
191 static HT_HEAD(event_debug_map
, event_debug_entry
) global_debug_map
=
194 HT_PROTOTYPE(event_debug_map
, event_debug_entry
, node
, hash_debug_entry
,
196 HT_GENERATE(event_debug_map
, event_debug_entry
, node
, hash_debug_entry
,
197 eq_debug_entry
, 0.5, mm_malloc
, mm_realloc
, mm_free
)
199 /* Macro: record that ev is now setup (that is, ready for an add) */
200 #define _event_debug_note_setup(ev) do { \
201 if (_event_debug_mode_on) { \
202 struct event_debug_entry *dent,find; \
204 EVLOCK_LOCK(_event_debug_map_lock, 0); \
205 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
209 dent = mm_malloc(sizeof(*dent)); \
212 "Out of memory in debugging code"); \
215 HT_INSERT(event_debug_map, &global_debug_map, dent); \
217 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
219 event_debug_mode_too_late = 1; \
220 } while (/*CONSTCOND*/0)
221 /* Macro: record that ev is no longer setup */
222 #define _event_debug_note_teardown(ev) do { \
223 if (_event_debug_mode_on) { \
224 struct event_debug_entry *dent,find; \
226 EVLOCK_LOCK(_event_debug_map_lock, 0); \
227 dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \
230 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
232 event_debug_mode_too_late = 1; \
233 } while (/*CONSTCOND*/0)
234 /* Macro: record that ev is now added */
235 #define _event_debug_note_add(ev) do { \
236 if (_event_debug_mode_on) { \
237 struct event_debug_entry *dent,find; \
239 EVLOCK_LOCK(_event_debug_map_lock, 0); \
240 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
244 event_errx(_EVENT_ERR_ABORT, \
245 "%s: noting an add on a non-setup event %p" \
246 " (events: 0x%x, fd: "EV_SOCK_FMT \
248 __func__, (ev), (ev)->ev_events, \
249 EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
251 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
253 event_debug_mode_too_late = 1; \
254 } while (/*CONSTCOND*/0)
255 /* Macro: record that ev is no longer added */
256 #define _event_debug_note_del(ev) do { \
257 if (_event_debug_mode_on) { \
258 struct event_debug_entry *dent,find; \
260 EVLOCK_LOCK(_event_debug_map_lock, 0); \
261 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
265 event_errx(_EVENT_ERR_ABORT, \
266 "%s: noting a del on a non-setup event %p" \
267 " (events: 0x%x, fd: "EV_SOCK_FMT \
269 __func__, (ev), (ev)->ev_events, \
270 EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
272 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
274 event_debug_mode_too_late = 1; \
275 } while (/*CONSTCOND*/0)
276 /* Macro: assert that ev is setup (i.e., okay to add or inspect) */
277 #define _event_debug_assert_is_setup(ev) do { \
278 if (_event_debug_mode_on) { \
279 struct event_debug_entry *dent,find; \
281 EVLOCK_LOCK(_event_debug_map_lock, 0); \
282 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
284 event_errx(_EVENT_ERR_ABORT, \
285 "%s called on a non-initialized event %p" \
286 " (events: 0x%x, fd: "EV_SOCK_FMT\
288 __func__, (ev), (ev)->ev_events, \
289 EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
291 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
293 } while (/*CONSTCOND*/0)
294 /* Macro: assert that ev is not added (i.e., okay to tear down or set
296 #define _event_debug_assert_not_added(ev) do { \
297 if (_event_debug_mode_on) { \
298 struct event_debug_entry *dent,find; \
300 EVLOCK_LOCK(_event_debug_map_lock, 0); \
301 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
302 if (dent && dent->added) { \
303 event_errx(_EVENT_ERR_ABORT, \
304 "%s called on an already added event %p" \
305 " (events: 0x%x, fd: "EV_SOCK_FMT", " \
307 __func__, (ev), (ev)->ev_events, \
308 EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
310 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
312 } while (/*CONSTCOND*/0)
314 #define _event_debug_note_setup(ev) \
316 #define _event_debug_note_teardown(ev) \
318 #define _event_debug_note_add(ev) \
320 #define _event_debug_note_del(ev) \
322 #define _event_debug_assert_is_setup(ev) \
324 #define _event_debug_assert_not_added(ev) \
328 #define EVENT_BASE_ASSERT_LOCKED(base) \
329 EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
331 /* The first time this function is called, it sets use_monotonic to 1
332 * if we have a clock function that supports monotonic time */
334 detect_monotonic(void)
336 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
338 static int use_monotonic_initialized
= 0;
340 if (use_monotonic_initialized
)
343 if (clock_gettime(CLOCK_MONOTONIC
, &ts
) == 0)
346 use_monotonic_initialized
= 1;
350 /* How often (in seconds) do we check for changes in wall clock time relative
351 * to monotonic time? Set this to -1 for 'never.' */
352 #define CLOCK_SYNC_INTERVAL -1
354 /** Set 'tp' to the current time according to 'base'. We must hold the lock
355 * on 'base'. If there is a cached time, return it. Otherwise, use
356 * clock_gettime or gettimeofday as appropriate to find out the right time.
357 * Return 0 on success, -1 on failure.
360 gettime(struct event_base
*base
, struct timeval
*tp
)
362 EVENT_BASE_ASSERT_LOCKED(base
);
364 if (base
->tv_cache
.tv_sec
) {
365 *tp
= base
->tv_cache
;
369 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
373 if (clock_gettime(CLOCK_MONOTONIC
, &ts
) == -1)
376 tp
->tv_sec
= ts
.tv_sec
;
377 tp
->tv_usec
= ts
.tv_nsec
/ 1000;
378 if (base
->last_updated_clock_diff
+ CLOCK_SYNC_INTERVAL
381 evutil_gettimeofday(&tv
,NULL
);
382 evutil_timersub(&tv
, tp
, &base
->tv_clock_diff
);
383 base
->last_updated_clock_diff
= ts
.tv_sec
;
390 return (evutil_gettimeofday(tp
, NULL
));
394 event_base_gettimeofday_cached(struct event_base
*base
, struct timeval
*tv
)
400 return evutil_gettimeofday(tv
, NULL
);
403 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
404 if (base
->tv_cache
.tv_sec
== 0) {
405 r
= evutil_gettimeofday(tv
, NULL
);
407 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
408 evutil_timeradd(&base
->tv_cache
, &base
->tv_clock_diff
, tv
);
410 *tv
= base
->tv_cache
;
414 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
418 /** Make 'base' have no current cached time. */
420 clear_time_cache(struct event_base
*base
)
422 base
->tv_cache
.tv_sec
= 0;
425 /** Replace the cached time in 'base' with the current time. */
427 update_time_cache(struct event_base
*base
)
429 base
->tv_cache
.tv_sec
= 0;
430 if (!(base
->flags
& EVENT_BASE_FLAG_NO_CACHE_TIME
))
431 gettime(base
, &base
->tv_cache
);
437 struct event_base
*base
= event_base_new_with_config(NULL
);
440 event_errx(1, "%s: Unable to construct event_base", __func__
);
452 struct event_base
*base
= NULL
;
453 struct event_config
*cfg
= event_config_new();
455 base
= event_base_new_with_config(cfg
);
456 event_config_free(cfg
);
461 /** Return true iff 'method' is the name of a method that 'cfg' tells us to
464 event_config_is_avoided_method(const struct event_config
*cfg
,
467 struct event_config_entry
*entry
;
469 TAILQ_FOREACH(entry
, &cfg
->entries
, next
) {
470 if (entry
->avoid_method
!= NULL
&&
471 strcmp(entry
->avoid_method
, method
) == 0)
478 /** Return true iff 'method' is disabled according to the environment. */
480 event_is_method_disabled(const char *name
)
482 char environment
[64];
485 evutil_snprintf(environment
, sizeof(environment
), "EVENT_NO%s", name
);
486 for (i
= 8; environment
[i
] != '\0'; ++i
)
487 environment
[i
] = EVUTIL_TOUPPER(environment
[i
]);
488 /* Note that evutil_getenv() ignores the environment entirely if
490 return (evutil_getenv(environment
) != NULL
);
494 event_base_get_features(const struct event_base
*base
)
496 return base
->evsel
->features
;
500 event_deferred_cb_queue_init(struct deferred_cb_queue
*cb
)
502 memset(cb
, 0, sizeof(struct deferred_cb_queue
));
503 TAILQ_INIT(&cb
->deferred_cb_list
);
506 /** Helper for the deferred_cb queue: wake up the event base. */
508 notify_base_cbq_callback(struct deferred_cb_queue
*cb
, void *baseptr
)
510 struct event_base
*base
= baseptr
;
511 if (EVBASE_NEED_NOTIFY(base
))
512 evthread_notify_base(base
);
515 struct deferred_cb_queue
*
516 event_base_get_deferred_cb_queue(struct event_base
*base
)
518 return base
? &base
->defer_queue
: NULL
;
522 event_enable_debug_mode(void)
524 #ifndef _EVENT_DISABLE_DEBUG_MODE
525 if (_event_debug_mode_on
)
526 event_errx(1, "%s was called twice!", __func__
);
527 if (event_debug_mode_too_late
)
528 event_errx(1, "%s must be called *before* creating any events "
529 "or event_bases",__func__
);
531 _event_debug_mode_on
= 1;
533 HT_INIT(event_debug_map
, &global_debug_map
);
539 event_disable_debug_mode(void)
541 struct event_debug_entry
**ent
, *victim
;
543 EVLOCK_LOCK(_event_debug_map_lock
, 0);
544 for (ent
= HT_START(event_debug_map
, &global_debug_map
); ent
; ) {
546 ent
= HT_NEXT_RMV(event_debug_map
,&global_debug_map
, ent
);
549 HT_CLEAR(event_debug_map
, &global_debug_map
);
550 EVLOCK_UNLOCK(_event_debug_map_lock
, 0);
555 event_base_new_with_config(const struct event_config
*cfg
)
558 struct event_base
*base
;
559 int should_check_environment
;
561 #ifndef _EVENT_DISABLE_DEBUG_MODE
562 event_debug_mode_too_late
= 1;
565 if ((base
= mm_calloc(1, sizeof(struct event_base
))) == NULL
) {
566 event_warn("%s: calloc", __func__
);
570 gettime(base
, &base
->event_tv
);
572 min_heap_ctor(&base
->timeheap
);
573 TAILQ_INIT(&base
->eventqueue
);
574 base
->sig
.ev_signal_pair
[0] = -1;
575 base
->sig
.ev_signal_pair
[1] = -1;
576 base
->th_notify_fd
[0] = -1;
577 base
->th_notify_fd
[1] = -1;
579 event_deferred_cb_queue_init(&base
->defer_queue
);
580 base
->defer_queue
.notify_fn
= notify_base_cbq_callback
;
581 base
->defer_queue
.notify_arg
= base
;
583 base
->flags
= cfg
->flags
;
585 evmap_io_initmap(&base
->io
);
586 evmap_signal_initmap(&base
->sigmap
);
587 event_changelist_init(&base
->changelist
);
591 should_check_environment
=
592 !(cfg
&& (cfg
->flags
& EVENT_BASE_FLAG_IGNORE_ENV
));
594 for (i
= 0; eventops
[i
] && !base
->evbase
; i
++) {
596 /* determine if this backend should be avoided */
597 if (event_config_is_avoided_method(cfg
,
600 if ((eventops
[i
]->features
& cfg
->require_features
)
601 != cfg
->require_features
)
605 /* also obey the environment variables */
606 if (should_check_environment
&&
607 event_is_method_disabled(eventops
[i
]->name
))
610 base
->evsel
= eventops
[i
];
612 base
->evbase
= base
->evsel
->init(base
);
615 if (base
->evbase
== NULL
) {
616 event_warnx("%s: no event mechanism available",
619 event_base_free(base
);
623 if (evutil_getenv("EVENT_SHOW_METHOD"))
624 event_msgx("libevent using: %s", base
->evsel
->name
);
626 /* allocate a single active event queue */
627 if (event_base_priority_init(base
, 1) < 0) {
628 event_base_free(base
);
632 /* prepare for threading */
634 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
635 if (EVTHREAD_LOCKING_ENABLED() &&
636 (!cfg
|| !(cfg
->flags
& EVENT_BASE_FLAG_NOLOCK
))) {
638 EVTHREAD_ALLOC_LOCK(base
->th_base_lock
,
639 EVTHREAD_LOCKTYPE_RECURSIVE
);
640 base
->defer_queue
.lock
= base
->th_base_lock
;
641 EVTHREAD_ALLOC_COND(base
->current_event_cond
);
642 r
= evthread_make_base_notifiable(base
);
644 event_warnx("%s: Unable to make base notifiable.", __func__
);
645 event_base_free(base
);
652 if (cfg
&& (cfg
->flags
& EVENT_BASE_FLAG_STARTUP_IOCP
))
653 event_base_start_iocp(base
, cfg
->n_cpus_hint
);
660 event_base_start_iocp(struct event_base
*base
, int n_cpus
)
665 base
->iocp
= event_iocp_port_launch(n_cpus
);
667 event_warnx("%s: Couldn't launch IOCP", __func__
);
677 event_base_stop_iocp(struct event_base
*base
)
684 rv
= event_iocp_shutdown(base
->iocp
, -1);
685 EVUTIL_ASSERT(rv
>= 0);
691 event_base_free(struct event_base
*base
)
695 /* XXXX grab the lock? If there is contention when one thread frees
696 * the base, then the contending thread will be very sad soon. */
698 /* event_base_free(NULL) is how to free the current_base if we
699 * made it with event_init and forgot to hold a reference to it. */
700 if (base
== NULL
&& current_base
)
702 /* If we're freeing current_base, there won't be a current_base. */
703 if (base
== current_base
)
705 /* Don't actually free NULL. */
707 event_warnx("%s: no base to free", __func__
);
710 /* XXX(niels) - check for internal events first */
713 event_base_stop_iocp(base
);
716 /* threading fds if we have them */
717 if (base
->th_notify_fd
[0] != -1) {
718 event_del(&base
->th_notify
);
719 EVUTIL_CLOSESOCKET(base
->th_notify_fd
[0]);
720 if (base
->th_notify_fd
[1] != -1)
721 EVUTIL_CLOSESOCKET(base
->th_notify_fd
[1]);
722 base
->th_notify_fd
[0] = -1;
723 base
->th_notify_fd
[1] = -1;
724 event_debug_unassign(&base
->th_notify
);
727 /* Delete all non-internal events. */
728 for (ev
= TAILQ_FIRST(&base
->eventqueue
); ev
; ) {
729 struct event
*next
= TAILQ_NEXT(ev
, ev_next
);
730 if (!(ev
->ev_flags
& EVLIST_INTERNAL
)) {
736 while ((ev
= min_heap_top(&base
->timeheap
)) != NULL
) {
740 for (i
= 0; i
< base
->n_common_timeouts
; ++i
) {
741 struct common_timeout_list
*ctl
=
742 base
->common_timeout_queues
[i
];
743 event_del(&ctl
->timeout_event
); /* Internal; doesn't count */
744 event_debug_unassign(&ctl
->timeout_event
);
745 for (ev
= TAILQ_FIRST(&ctl
->events
); ev
; ) {
746 struct event
*next
= TAILQ_NEXT(ev
,
747 ev_timeout_pos
.ev_next_with_common_timeout
);
748 if (!(ev
->ev_flags
& EVLIST_INTERNAL
)) {
756 if (base
->common_timeout_queues
)
757 mm_free(base
->common_timeout_queues
);
759 for (i
= 0; i
< base
->nactivequeues
; ++i
) {
760 for (ev
= TAILQ_FIRST(&base
->activequeues
[i
]); ev
; ) {
761 struct event
*next
= TAILQ_NEXT(ev
, ev_active_next
);
762 if (!(ev
->ev_flags
& EVLIST_INTERNAL
)) {
771 event_debug(("%s: %d events were still set in base",
772 __func__
, n_deleted
));
774 if (base
->evsel
!= NULL
&& base
->evsel
->dealloc
!= NULL
)
775 base
->evsel
->dealloc(base
);
777 for (i
= 0; i
< base
->nactivequeues
; ++i
)
778 EVUTIL_ASSERT(TAILQ_EMPTY(&base
->activequeues
[i
]));
780 EVUTIL_ASSERT(min_heap_empty(&base
->timeheap
));
781 min_heap_dtor(&base
->timeheap
);
783 mm_free(base
->activequeues
);
785 EVUTIL_ASSERT(TAILQ_EMPTY(&base
->eventqueue
));
787 evmap_io_clear(&base
->io
);
788 evmap_signal_clear(&base
->sigmap
);
789 event_changelist_freemem(&base
->changelist
);
791 EVTHREAD_FREE_LOCK(base
->th_base_lock
, EVTHREAD_LOCKTYPE_RECURSIVE
);
792 EVTHREAD_FREE_COND(base
->current_event_cond
);
797 /* reinitialize the event base after a fork */
799 event_reinit(struct event_base
*base
)
801 const struct eventop
*evsel
;
804 int was_notifiable
= 0;
806 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
811 /* Right now, reinit always takes effect, since even if the
812 backend doesn't require it, the signal socketpair code does.
816 /* check if this event mechanism requires reinit */
817 if (!evsel
->need_reinit
)
821 /* prevent internal delete */
822 if (base
->sig
.ev_signal_added
) {
823 /* we cannot call event_del here because the base has
824 * not been reinitialized yet. */
825 event_queue_remove(base
, &base
->sig
.ev_signal
,
827 if (base
->sig
.ev_signal
.ev_flags
& EVLIST_ACTIVE
)
828 event_queue_remove(base
, &base
->sig
.ev_signal
,
830 if (base
->sig
.ev_signal_pair
[0] != -1)
831 EVUTIL_CLOSESOCKET(base
->sig
.ev_signal_pair
[0]);
832 if (base
->sig
.ev_signal_pair
[1] != -1)
833 EVUTIL_CLOSESOCKET(base
->sig
.ev_signal_pair
[1]);
834 base
->sig
.ev_signal_added
= 0;
836 if (base
->th_notify_fd
[0] != -1) {
837 /* we cannot call event_del here because the base has
838 * not been reinitialized yet. */
840 event_queue_remove(base
, &base
->th_notify
,
842 if (base
->th_notify
.ev_flags
& EVLIST_ACTIVE
)
843 event_queue_remove(base
, &base
->th_notify
,
845 base
->sig
.ev_signal_added
= 0;
846 EVUTIL_CLOSESOCKET(base
->th_notify_fd
[0]);
847 if (base
->th_notify_fd
[1] != -1)
848 EVUTIL_CLOSESOCKET(base
->th_notify_fd
[1]);
849 base
->th_notify_fd
[0] = -1;
850 base
->th_notify_fd
[1] = -1;
851 event_debug_unassign(&base
->th_notify
);
854 if (base
->evsel
->dealloc
!= NULL
)
855 base
->evsel
->dealloc(base
);
856 base
->evbase
= evsel
->init(base
);
857 if (base
->evbase
== NULL
) {
858 event_errx(1, "%s: could not reinitialize event mechanism",
864 event_changelist_freemem(&base
->changelist
); /* XXX */
865 evmap_io_clear(&base
->io
);
866 evmap_signal_clear(&base
->sigmap
);
868 TAILQ_FOREACH(ev
, &base
->eventqueue
, ev_next
) {
869 if (ev
->ev_events
& (EV_READ
|EV_WRITE
)) {
870 if (ev
== &base
->sig
.ev_signal
) {
871 /* If we run into the ev_signal event, it's only
872 * in eventqueue because some signal event was
873 * added, which made evsig_add re-add ev_signal.
874 * So don't double-add it. */
877 if (evmap_io_add(base
, ev
->ev_fd
, ev
) == -1)
879 } else if (ev
->ev_events
& EV_SIGNAL
) {
880 if (evmap_signal_add(base
, (int)ev
->ev_fd
, ev
) == -1)
885 if (was_notifiable
&& res
== 0)
886 res
= evthread_make_base_notifiable(base
);
889 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
894 event_get_supported_methods(void)
896 static const char **methods
= NULL
;
897 const struct eventop
**method
;
901 /* count all methods */
902 for (method
= &eventops
[0]; *method
!= NULL
; ++method
) {
906 /* allocate one more than we need for the NULL pointer */
907 tmp
= mm_calloc((i
+ 1), sizeof(char *));
911 /* populate the array with the supported methods */
912 for (k
= 0, i
= 0; eventops
[k
] != NULL
; ++k
) {
913 tmp
[i
++] = eventops
[k
]->name
;
918 mm_free(__UNCONST(methods
));
925 struct event_config
*
926 event_config_new(void)
928 struct event_config
*cfg
= mm_calloc(1, sizeof(*cfg
));
933 TAILQ_INIT(&cfg
->entries
);
939 event_config_entry_free(struct event_config_entry
*entry
)
941 if (entry
->avoid_method
!= NULL
)
942 mm_free(__UNCONST(entry
->avoid_method
));
947 event_config_free(struct event_config
*cfg
)
949 struct event_config_entry
*entry
;
951 while ((entry
= TAILQ_FIRST(&cfg
->entries
)) != NULL
) {
952 TAILQ_REMOVE(&cfg
->entries
, entry
, next
);
953 event_config_entry_free(entry
);
959 event_config_set_flag(struct event_config
*cfg
, int flag
)
968 event_config_avoid_method(struct event_config
*cfg
, const char *method
)
970 struct event_config_entry
*entry
= mm_malloc(sizeof(*entry
));
974 if ((entry
->avoid_method
= mm_strdup(method
)) == NULL
) {
979 TAILQ_INSERT_TAIL(&cfg
->entries
, entry
, next
);
985 event_config_require_features(struct event_config
*cfg
,
990 cfg
->require_features
= features
;
995 event_config_set_num_cpus_hint(struct event_config
*cfg
, int cpus
)
999 cfg
->n_cpus_hint
= cpus
;
1004 event_priority_init(int npriorities
)
1006 return event_base_priority_init(current_base
, npriorities
);
1010 event_base_priority_init(struct event_base
*base
, int npriorities
)
1014 if (N_ACTIVE_CALLBACKS(base
) || npriorities
< 1
1015 || npriorities
>= EVENT_MAX_PRIORITIES
)
1018 if (npriorities
== base
->nactivequeues
)
1021 if (base
->nactivequeues
) {
1022 mm_free(base
->activequeues
);
1023 base
->nactivequeues
= 0;
1026 /* Allocate our priority queues */
1027 base
->activequeues
= (struct event_list
*)
1028 mm_calloc(npriorities
, sizeof(struct event_list
));
1029 if (base
->activequeues
== NULL
) {
1030 event_warn("%s: calloc", __func__
);
1033 base
->nactivequeues
= npriorities
;
1035 for (i
= 0; i
< base
->nactivequeues
; ++i
) {
1036 TAILQ_INIT(&base
->activequeues
[i
]);
1042 /* Returns true iff we're currently watching any events. */
1044 event_haveevents(struct event_base
*base
)
1046 /* Caller must hold th_base_lock */
1047 return (base
->virtual_event_count
> 0 || base
->event_count
> 0);
1050 /* "closure" function called when processing active signal events */
1052 event_signal_closure(struct event_base
*base
, struct event
*ev
)
1057 /* Allows deletes to work */
1058 ncalls
= ev
->ev_ncalls
;
1060 ev
->ev_pncalls
= &ncalls
;
1061 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1064 ev
->ev_ncalls
= ncalls
;
1066 ev
->ev_pncalls
= NULL
;
1067 (*ev
->ev_callback
)(ev
->ev_fd
, ev
->ev_res
, ev
->ev_arg
);
1069 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
1070 should_break
= base
->event_break
;
1071 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1075 ev
->ev_pncalls
= NULL
;
1081 /* Common timeouts are special timeouts that are handled as queues rather than
1082 * in the minheap. This is more efficient than the minheap if we happen to
1083 * know that we're going to get several thousands of timeout events all with
1084 * the same timeout value.
1086 * Since all our timeout handling code assumes timevals can be copied,
1087 * assigned, etc, we can't use "magic pointer" to encode these common
1088 * timeouts. Searching through a list to see if every timeout is common could
1089 * also get inefficient. Instead, we take advantage of the fact that tv_usec
1090 * is 32 bits long, but only uses 20 of those bits (since it can never be over
1091 * 999999.) We use the top bits to encode 4 bites of magic number, and 8 bits
1092 * of index into the event_base's aray of common timeouts.
1095 #define MICROSECONDS_MASK COMMON_TIMEOUT_MICROSECONDS_MASK
1096 #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
1097 #define COMMON_TIMEOUT_IDX_SHIFT 20
1098 #define COMMON_TIMEOUT_MASK 0xf0000000
1099 #define COMMON_TIMEOUT_MAGIC 0x50000000
1101 #define COMMON_TIMEOUT_IDX(tv) \
1102 (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
1104 /** Return true iff if 'tv' is a common timeout in 'base' */
1106 is_common_timeout(const struct timeval
*tv
,
1107 const struct event_base
*base
)
1110 if ((tv
->tv_usec
& COMMON_TIMEOUT_MASK
) != COMMON_TIMEOUT_MAGIC
)
1112 idx
= COMMON_TIMEOUT_IDX(tv
);
1113 return idx
< base
->n_common_timeouts
;
1116 /* True iff tv1 and tv2 have the same common-timeout index, or if neither
1117 * one is a common timeout. */
1119 is_same_common_timeout(const struct timeval
*tv1
, const struct timeval
*tv2
)
1121 return (tv1
->tv_usec
& ~MICROSECONDS_MASK
) ==
1122 (tv2
->tv_usec
& ~MICROSECONDS_MASK
);
1125 /** Requires that 'tv' is a common timeout. Return the corresponding
1126 * common_timeout_list. */
1127 static inline struct common_timeout_list
*
1128 get_common_timeout_list(struct event_base
*base
, const struct timeval
*tv
)
1130 return base
->common_timeout_queues
[COMMON_TIMEOUT_IDX(tv
)];
1135 common_timeout_ok(const struct timeval
*tv
,
1136 struct event_base
*base
)
1138 const struct timeval
*expect
=
1139 &get_common_timeout_list(base
, tv
)->duration
;
1140 return tv
->tv_sec
== expect
->tv_sec
&&
1141 tv
->tv_usec
== expect
->tv_usec
;
1145 /* Add the timeout for the first event in given common timeout list to the
1146 * event_base's minheap. */
1148 common_timeout_schedule(struct common_timeout_list
*ctl
,
1149 const struct timeval
*now
, struct event
*head
)
1151 struct timeval timeout
= head
->ev_timeout
;
1152 timeout
.tv_usec
&= MICROSECONDS_MASK
;
1153 event_add_internal(&ctl
->timeout_event
, &timeout
, 1);
1156 /* Callback: invoked when the timeout for a common timeout queue triggers.
1157 * This means that (at least) the first event in that queue should be run,
1158 * and the timeout should be rescheduled if there are more events. */
1160 common_timeout_callback(evutil_socket_t fd
, short what
, void *arg
)
1163 struct common_timeout_list
*ctl
= arg
;
1164 struct event_base
*base
= ctl
->base
;
1165 struct event
*ev
= NULL
;
1166 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
1167 gettime(base
, &now
);
1169 ev
= TAILQ_FIRST(&ctl
->events
);
1170 if (!ev
|| ev
->ev_timeout
.tv_sec
> now
.tv_sec
||
1171 (ev
->ev_timeout
.tv_sec
== now
.tv_sec
&&
1172 (ev
->ev_timeout
.tv_usec
&MICROSECONDS_MASK
) > now
.tv_usec
))
1174 event_del_internal(ev
);
1175 event_active_nolock(ev
, EV_TIMEOUT
, 1);
1178 common_timeout_schedule(ctl
, &now
, ev
);
1179 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1182 #define MAX_COMMON_TIMEOUTS 256
1184 const struct timeval
*
1185 event_base_init_common_timeout(struct event_base
*base
,
1186 const struct timeval
*duration
)
1190 const struct timeval
*result
=NULL
;
1191 struct common_timeout_list
*new_ctl
;
1193 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
1194 if (duration
->tv_usec
> 1000000) {
1195 memcpy(&tv
, duration
, sizeof(struct timeval
));
1196 if (is_common_timeout(duration
, base
))
1197 tv
.tv_usec
&= MICROSECONDS_MASK
;
1198 tv
.tv_sec
+= tv
.tv_usec
/ 1000000;
1199 tv
.tv_usec
%= 1000000;
1202 for (i
= 0; i
< base
->n_common_timeouts
; ++i
) {
1203 const struct common_timeout_list
*ctl
=
1204 base
->common_timeout_queues
[i
];
1205 if (duration
->tv_sec
== ctl
->duration
.tv_sec
&&
1206 duration
->tv_usec
==
1207 (ctl
->duration
.tv_usec
& MICROSECONDS_MASK
)) {
1208 EVUTIL_ASSERT(is_common_timeout(&ctl
->duration
, base
));
1209 result
= &ctl
->duration
;
1213 if (base
->n_common_timeouts
== MAX_COMMON_TIMEOUTS
) {
1214 event_warnx("%s: Too many common timeouts already in use; "
1215 "we only support %d per event_base", __func__
,
1216 MAX_COMMON_TIMEOUTS
);
1219 if (base
->n_common_timeouts_allocated
== base
->n_common_timeouts
) {
1220 int n
= base
->n_common_timeouts
< 16 ? 16 :
1221 base
->n_common_timeouts
*2;
1222 struct common_timeout_list
**newqueues
=
1223 mm_realloc(base
->common_timeout_queues
,
1224 n
*sizeof(struct common_timeout_queue
*));
1226 event_warn("%s: realloc",__func__
);
1229 base
->n_common_timeouts_allocated
= n
;
1230 base
->common_timeout_queues
= newqueues
;
1232 new_ctl
= mm_calloc(1, sizeof(struct common_timeout_list
));
1234 event_warn("%s: calloc",__func__
);
1237 TAILQ_INIT(&new_ctl
->events
);
1238 new_ctl
->duration
.tv_sec
= duration
->tv_sec
;
1239 new_ctl
->duration
.tv_usec
=
1240 duration
->tv_usec
| COMMON_TIMEOUT_MAGIC
|
1241 (base
->n_common_timeouts
<< COMMON_TIMEOUT_IDX_SHIFT
);
1242 evtimer_assign(&new_ctl
->timeout_event
, base
,
1243 common_timeout_callback
, new_ctl
);
1244 new_ctl
->timeout_event
.ev_flags
|= EVLIST_INTERNAL
;
1245 event_priority_set(&new_ctl
->timeout_event
, 0);
1246 new_ctl
->base
= base
;
1247 base
->common_timeout_queues
[base
->n_common_timeouts
++] = new_ctl
;
1248 result
= &new_ctl
->duration
;
1252 EVUTIL_ASSERT(is_common_timeout(result
, base
));
1254 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1258 /* Closure function invoked when we're activating a persistent event. */
1260 event_persist_closure(struct event_base
*base
, struct event
*ev
)
1262 // Define our callback, we use this to store our callback before it's executed
1263 void (*evcb_callback
)(evutil_socket_t
, short, void *);
1265 // Other fields of *ev that must be stored before executing
1266 evutil_socket_t evcb_fd
;
1270 /* reschedule the persistent event if we have a timeout. */
1271 if (ev
->ev_io_timeout
.tv_sec
|| ev
->ev_io_timeout
.tv_usec
) {
1272 /* If there was a timeout, we want it to run at an interval of
1273 * ev_io_timeout after the last time it was _scheduled_ for,
1274 * not ev_io_timeout after _now_. If it fired for another
1275 * reason, though, the timeout ought to start ticking _now_. */
1276 struct timeval run_at
, relative_to
, delay
, now
;
1277 ev_uint32_t usec_mask
= 0;
1278 EVUTIL_ASSERT(is_same_common_timeout(&ev
->ev_timeout
,
1279 &ev
->ev_io_timeout
));
1280 gettime(base
, &now
);
1281 if (is_common_timeout(&ev
->ev_timeout
, base
)) {
1282 delay
= ev
->ev_io_timeout
;
1283 usec_mask
= delay
.tv_usec
& ~MICROSECONDS_MASK
;
1284 delay
.tv_usec
&= MICROSECONDS_MASK
;
1285 if (ev
->ev_res
& EV_TIMEOUT
) {
1286 relative_to
= ev
->ev_timeout
;
1287 relative_to
.tv_usec
&= MICROSECONDS_MASK
;
1292 delay
= ev
->ev_io_timeout
;
1293 if (ev
->ev_res
& EV_TIMEOUT
) {
1294 relative_to
= ev
->ev_timeout
;
1299 evutil_timeradd(&relative_to
, &delay
, &run_at
);
1300 if (evutil_timercmp(&run_at
, &now
, <)) {
1301 /* Looks like we missed at least one invocation due to
1302 * a clock jump, not running the event loop for a
1303 * while, really slow callbacks, or
1304 * something. Reschedule relative to now.
1306 evutil_timeradd(&now
, &delay
, &run_at
);
1308 run_at
.tv_usec
|= usec_mask
;
1309 event_add_internal(ev
, &run_at
, 1);
1312 // Save our callback before we release the lock
1313 evcb_callback
= ev
->ev_callback
;
1314 evcb_fd
= ev
->ev_fd
;
1315 evcb_res
= ev
->ev_res
;
1316 evcb_arg
= ev
->ev_arg
;
1319 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1321 // Execute the callback
1322 (evcb_callback
)(evcb_fd
, evcb_res
, evcb_arg
);
1326 Helper for event_process_active to process all the events in a single queue,
1327 releasing the lock as we go. This function requires that the lock be held
1328 when it's invoked. Returns -1 if we get a signal or an event_break that
1329 means we should stop processing any active events now. Otherwise returns
1330 the number of non-internal events that we processed.
1333 event_process_active_single_queue(struct event_base
*base
,
1334 struct event_list
*activeq
)
1339 EVUTIL_ASSERT(activeq
!= NULL
);
1341 for (ev
= TAILQ_FIRST(activeq
); ev
; ev
= TAILQ_FIRST(activeq
)) {
1342 if (ev
->ev_events
& EV_PERSIST
)
1343 event_queue_remove(base
, ev
, EVLIST_ACTIVE
);
1345 event_del_internal(ev
);
1346 if (!(ev
->ev_flags
& EVLIST_INTERNAL
))
1350 "event_process_active: event: %p, %s%scall %p",
1352 ev
->ev_res
& EV_READ
? "EV_READ " : " ",
1353 ev
->ev_res
& EV_WRITE
? "EV_WRITE " : " ",
1356 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
1357 base
->current_event
= ev
;
1358 base
->current_event_waiters
= 0;
1361 switch (ev
->ev_closure
) {
1362 case EV_CLOSURE_SIGNAL
:
1363 event_signal_closure(base
, ev
);
1365 case EV_CLOSURE_PERSIST
:
1366 event_persist_closure(base
, ev
);
1369 case EV_CLOSURE_NONE
:
1370 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1372 ev
->ev_fd
, ev
->ev_res
, ev
->ev_arg
);
1376 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
1377 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
1378 base
->current_event
= NULL
;
1379 if (base
->current_event_waiters
) {
1380 base
->current_event_waiters
= 0;
1381 EVTHREAD_COND_BROADCAST(base
->current_event_cond
);
1385 if (base
->event_break
)
1387 if (base
->event_continue
)
1394 Process up to MAX_DEFERRED of the defered_cb entries in 'queue'. If
1395 *breakptr becomes set to 1, stop. Requires that we start out holding
1396 the lock on 'queue'; releases the lock around 'queue' for each deferred_cb
1400 event_process_deferred_callbacks(struct deferred_cb_queue
*queue
, int *breakptr
)
1403 struct deferred_cb
*cb
;
1405 #define MAX_DEFERRED 16
1406 while ((cb
= TAILQ_FIRST(&queue
->deferred_cb_list
))) {
1408 TAILQ_REMOVE(&queue
->deferred_cb_list
, cb
, cb_next
);
1409 --queue
->active_count
;
1410 UNLOCK_DEFERRED_QUEUE(queue
);
1412 cb
->cb(cb
, cb
->arg
);
1414 LOCK_DEFERRED_QUEUE(queue
);
1417 if (++count
== MAX_DEFERRED
)
1425 * Active events are stored in priority queues. Lower priorities are always
1426 * process before higher priorities. Low priority events can starve high
1431 event_process_active(struct event_base
*base
)
1433 /* Caller must hold th_base_lock */
1434 struct event_list
*activeq
= NULL
;
1437 for (i
= 0; i
< base
->nactivequeues
; ++i
) {
1438 if (TAILQ_FIRST(&base
->activequeues
[i
]) != NULL
) {
1439 base
->event_running_priority
= i
;
1440 activeq
= &base
->activequeues
[i
];
1441 c
= event_process_active_single_queue(base
, activeq
);
1443 base
->event_running_priority
= -1;
1446 break; /* Processed a real event; do not
1447 * consider lower-priority events */
1448 /* If we get here, all of the events we processed
1449 * were internal. Continue. */
1453 event_process_deferred_callbacks(&base
->defer_queue
,&base
->event_break
);
1454 base
->event_running_priority
= -1;
1459 * Wait continuously for events. We exit only if no events are left.
1463 event_dispatch(void)
1465 return (event_loop(0));
1469 event_base_dispatch(struct event_base
*event_base
)
1471 return (event_base_loop(event_base
, 0));
1475 event_base_get_method(const struct event_base
*base
)
1477 EVUTIL_ASSERT(base
);
1478 return (base
->evsel
->name
);
1481 /** Callback: used to implement event_base_loopexit by telling the event_base
1482 * that it's time to exit its loop. */
1484 event_loopexit_cb(evutil_socket_t fd
, short what
, void *arg
)
1486 struct event_base
*base
= arg
;
1487 base
->event_gotterm
= 1;
1491 event_loopexit(const struct timeval
*tv
)
1493 return (event_once(-1, EV_TIMEOUT
, event_loopexit_cb
,
1498 event_base_loopexit(struct event_base
*event_base
, const struct timeval
*tv
)
1500 return (event_base_once(event_base
, -1, EV_TIMEOUT
, event_loopexit_cb
,
1505 event_loopbreak(void)
1507 return (event_base_loopbreak(current_base
));
1511 event_base_loopbreak(struct event_base
*event_base
)
1514 if (event_base
== NULL
)
1517 EVBASE_ACQUIRE_LOCK(event_base
, th_base_lock
);
1518 event_base
->event_break
= 1;
1520 if (EVBASE_NEED_NOTIFY(event_base
)) {
1521 r
= evthread_notify_base(event_base
);
1525 EVBASE_RELEASE_LOCK(event_base
, th_base_lock
);
1530 event_base_got_break(struct event_base
*event_base
)
1533 EVBASE_ACQUIRE_LOCK(event_base
, th_base_lock
);
1534 res
= event_base
->event_break
;
1535 EVBASE_RELEASE_LOCK(event_base
, th_base_lock
);
1540 event_base_got_exit(struct event_base
*event_base
)
1543 EVBASE_ACQUIRE_LOCK(event_base
, th_base_lock
);
1544 res
= event_base
->event_gotterm
;
1545 EVBASE_RELEASE_LOCK(event_base
, th_base_lock
);
1549 /* not thread safe */
1552 event_loop(int flags
)
1554 return event_base_loop(current_base
, flags
);
1558 event_base_loop(struct event_base
*base
, int flags
)
1560 const struct eventop
*evsel
= base
->evsel
;
1562 struct timeval
*tv_p
;
1563 int res
, done
, retval
= 0;
1565 /* Grab the lock. We will release it inside evsel.dispatch, and again
1566 * as we invoke user callbacks. */
1567 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
1569 if (base
->running_loop
) {
1570 event_warnx("%s: reentrant invocation. Only one event_base_loop"
1571 " can run on each event_base at once.", __func__
);
1572 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1576 base
->running_loop
= 1;
1578 clear_time_cache(base
);
1580 if (base
->sig
.ev_signal_added
&& base
->sig
.ev_n_signals_added
)
1581 evsig_set_base(base
);
1585 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
1586 base
->th_owner_id
= EVTHREAD_GET_ID();
1589 base
->event_gotterm
= base
->event_break
= 0;
1592 base
->event_continue
= 0;
1594 /* Terminate the loop if we have been asked to */
1595 if (base
->event_gotterm
) {
1599 if (base
->event_break
) {
1603 timeout_correct(base
, &tv
);
1606 if (!N_ACTIVE_CALLBACKS(base
) && !(flags
& EVLOOP_NONBLOCK
)) {
1607 timeout_next(base
, &tv_p
);
1610 * if we have active events, we just poll new events
1613 evutil_timerclear(&tv
);
1616 /* If we have no events, we just exit */
1617 if (!event_haveevents(base
) && !N_ACTIVE_CALLBACKS(base
)) {
1618 event_debug(("%s: no events registered.", __func__
));
1623 /* update last old time */
1624 gettime(base
, &base
->event_tv
);
1626 clear_time_cache(base
);
1628 res
= evsel
->dispatch(base
, tv_p
);
1631 event_debug(("%s: dispatch returned unsuccessfully.",
1637 update_time_cache(base
);
1639 timeout_process(base
);
1641 if (N_ACTIVE_CALLBACKS(base
)) {
1642 int n
= event_process_active(base
);
1643 if ((flags
& EVLOOP_ONCE
)
1644 && N_ACTIVE_CALLBACKS(base
) == 0
1647 } else if (flags
& EVLOOP_NONBLOCK
)
1650 event_debug(("%s: asked to terminate loop.", __func__
));
1653 clear_time_cache(base
);
1654 base
->running_loop
= 0;
1656 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1661 /* Sets up an event for processing once */
1665 void (*cb
)(evutil_socket_t
, short, void *);
1669 /* One-time callback to implement event_base_once: invokes the user callback,
1670 * then deletes the allocated storage */
1672 event_once_cb(evutil_socket_t fd
, short events
, void *arg
)
1674 struct event_once
*eonce
= arg
;
1676 (*eonce
->cb
)(fd
, events
, eonce
->arg
);
1677 event_debug_unassign(&eonce
->ev
);
1681 /* not threadsafe, event scheduled once. */
1683 event_once(evutil_socket_t fd
, short events
,
1684 void (*callback
)(evutil_socket_t
, short, void *),
1685 void *arg
, const struct timeval
*tv
)
1687 return event_base_once(current_base
, fd
, events
, callback
, arg
, tv
);
1690 /* Schedules an event once */
1692 event_base_once(struct event_base
*base
, evutil_socket_t fd
, short events
,
1693 void (*callback
)(evutil_socket_t
, short, void *),
1694 void *arg
, const struct timeval
*tv
)
1696 struct event_once
*eonce
;
1700 /* We cannot support signals that just fire once, or persistent
1702 if (events
& (EV_SIGNAL
|EV_PERSIST
))
1705 if ((eonce
= mm_calloc(1, sizeof(struct event_once
))) == NULL
)
1708 eonce
->cb
= callback
;
1711 if (events
== EV_TIMEOUT
) {
1713 evutil_timerclear(&etv
);
1717 evtimer_assign(&eonce
->ev
, base
, event_once_cb
, eonce
);
1718 } else if (events
& (EV_READ
|EV_WRITE
)) {
1719 events
&= EV_READ
|EV_WRITE
;
1721 event_assign(&eonce
->ev
, base
, fd
, events
, event_once_cb
, eonce
);
1723 /* Bad event combination */
1729 res
= event_add(&eonce
->ev
, tv
);
1739 event_assign(struct event
*ev
, struct event_base
*base
, evutil_socket_t fd
, short events
, void (*callback
)(evutil_socket_t
, short, void *), void *arg
)
1742 base
= current_base
;
1744 _event_debug_assert_not_added(ev
);
1748 ev
->ev_callback
= callback
;
1751 ev
->ev_events
= events
;
1753 ev
->ev_flags
= EVLIST_INIT
;
1755 ev
->ev_pncalls
= NULL
;
1757 if (events
& EV_SIGNAL
) {
1758 if ((events
& (EV_READ
|EV_WRITE
)) != 0) {
1759 event_warnx("%s: EV_SIGNAL is not compatible with "
1760 "EV_READ or EV_WRITE", __func__
);
1763 ev
->ev_closure
= EV_CLOSURE_SIGNAL
;
1765 if (events
& EV_PERSIST
) {
1766 evutil_timerclear(&ev
->ev_io_timeout
);
1767 ev
->ev_closure
= EV_CLOSURE_PERSIST
;
1769 ev
->ev_closure
= EV_CLOSURE_NONE
;
1773 min_heap_elem_init(ev
);
1776 /* by default, we put new events into the middle priority */
1777 ev
->ev_pri
= base
->nactivequeues
/ 2;
1780 _event_debug_note_setup(ev
);
1786 event_base_set(struct event_base
*base
, struct event
*ev
)
1788 /* Only innocent events may be assigned to a different base */
1789 if (ev
->ev_flags
!= EVLIST_INIT
)
1792 _event_debug_assert_is_setup(ev
);
1795 ev
->ev_pri
= base
->nactivequeues
/2;
1801 event_set(struct event
*ev
, evutil_socket_t fd
, short events
,
1802 void (*callback
)(evutil_socket_t
, short, void *), void *arg
)
1805 r
= event_assign(ev
, current_base
, fd
, events
, callback
, arg
);
1806 EVUTIL_ASSERT(r
== 0);
1810 event_new(struct event_base
*base
, evutil_socket_t fd
, short events
, void (*cb
)(evutil_socket_t
, short, void *), void *arg
)
1813 ev
= mm_malloc(sizeof(struct event
));
1816 if (event_assign(ev
, base
, fd
, events
, cb
, arg
) < 0) {
1825 event_free(struct event
*ev
)
1827 _event_debug_assert_is_setup(ev
);
1829 /* make sure that this event won't be coming back to haunt us. */
1831 _event_debug_note_teardown(ev
);
1837 event_debug_unassign(struct event
*ev
)
1839 _event_debug_assert_not_added(ev
);
1840 _event_debug_note_teardown(ev
);
1842 ev
->ev_flags
&= ~EVLIST_INIT
;
1846 * Set's the priority of an event - if an event is already scheduled
1847 * changing the priority is going to fail.
1851 event_priority_set(struct event
*ev
, int pri
)
1853 _event_debug_assert_is_setup(ev
);
1855 if (ev
->ev_flags
& EVLIST_ACTIVE
)
1857 if (pri
< 0 || pri
>= ev
->ev_base
->nactivequeues
)
1866 * Checks if a specific event is pending or scheduled.
1870 event_pending(const struct event
*ev
, short event
, struct timeval
*tv
)
1874 if (EVUTIL_FAILURE_CHECK(ev
->ev_base
== NULL
)) {
1875 event_warnx("%s: event has no event_base set.", __func__
);
1879 EVBASE_ACQUIRE_LOCK(ev
->ev_base
, th_base_lock
);
1880 _event_debug_assert_is_setup(ev
);
1882 if (ev
->ev_flags
& EVLIST_INSERTED
)
1883 flags
|= (ev
->ev_events
& (EV_READ
|EV_WRITE
|EV_SIGNAL
));
1884 if (ev
->ev_flags
& EVLIST_ACTIVE
)
1885 flags
|= ev
->ev_res
;
1886 if (ev
->ev_flags
& EVLIST_TIMEOUT
)
1887 flags
|= EV_TIMEOUT
;
1889 event
&= (EV_TIMEOUT
|EV_READ
|EV_WRITE
|EV_SIGNAL
);
1891 /* See if there is a timeout that we should report */
1892 if (tv
!= NULL
&& (flags
& event
& EV_TIMEOUT
)) {
1893 struct timeval tmp
= ev
->ev_timeout
;
1894 tmp
.tv_usec
&= MICROSECONDS_MASK
;
1895 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1896 /* correctly remamp to real time */
1897 evutil_timeradd(&ev
->ev_base
->tv_clock_diff
, &tmp
, tv
);
1903 EVBASE_RELEASE_LOCK(ev
->ev_base
, th_base_lock
);
1905 return (flags
& event
);
1909 event_initialized(const struct event
*ev
)
1911 if (!(ev
->ev_flags
& EVLIST_INIT
))
1918 event_get_assignment(const struct event
*event
, struct event_base
**base_out
, evutil_socket_t
*fd_out
, short *events_out
, event_callback_fn
*callback_out
, void **arg_out
)
1920 _event_debug_assert_is_setup(event
);
1923 *base_out
= event
->ev_base
;
1925 *fd_out
= event
->ev_fd
;
1927 *events_out
= event
->ev_events
;
1929 *callback_out
= event
->ev_callback
;
1931 *arg_out
= event
->ev_arg
;
1935 event_get_struct_event_size(void)
1937 return sizeof(struct event
);
1941 event_get_fd(const struct event
*ev
)
1943 _event_debug_assert_is_setup(ev
);
1948 event_get_base(const struct event
*ev
)
1950 _event_debug_assert_is_setup(ev
);
1955 event_get_events(const struct event
*ev
)
1957 _event_debug_assert_is_setup(ev
);
1958 return ev
->ev_events
;
1962 event_get_callback(const struct event
*ev
)
1964 _event_debug_assert_is_setup(ev
);
1965 return ev
->ev_callback
;
1969 event_get_callback_arg(const struct event
*ev
)
1971 _event_debug_assert_is_setup(ev
);
1976 event_add(struct event
*ev
, const struct timeval
*tv
)
1980 if (EVUTIL_FAILURE_CHECK(!ev
->ev_base
)) {
1981 event_warnx("%s: event has no event_base set.", __func__
);
1985 EVBASE_ACQUIRE_LOCK(ev
->ev_base
, th_base_lock
);
1987 res
= event_add_internal(ev
, tv
, 0);
1989 EVBASE_RELEASE_LOCK(ev
->ev_base
, th_base_lock
);
1994 /* Helper callback: wake an event_base from another thread. This version
1995 * works by writing a byte to one end of a socketpair, so that the event_base
1996 * listening on the other end will wake up as the corresponding event
1999 evthread_notify_base_default(struct event_base
*base
)
2005 r
= send(base
->th_notify_fd
[1], buf
, 1, 0);
2007 r
= write(base
->th_notify_fd
[1], buf
, 1);
2009 return (r
< 0 && errno
!= EAGAIN
) ? -1 : 0;
2012 #if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
2013 /* Helper callback: wake an event_base from another thread. This version
2014 * assumes that you have a working eventfd() implementation. */
2016 evthread_notify_base_eventfd(struct event_base
*base
)
2018 ev_uint64_t msg
= 1;
2021 r
= write(base
->th_notify_fd
[0], (void*) &msg
, sizeof(msg
));
2022 } while (r
< 0 && errno
== EAGAIN
);
2024 return (r
< 0) ? -1 : 0;
2028 /** Tell the thread currently running the event_loop for base (if any) that it
2029 * needs to stop waiting in its dispatch function (if it is) and process all
2030 * active events and deferred callbacks (if there are any). */
2032 evthread_notify_base(struct event_base
*base
)
2034 EVENT_BASE_ASSERT_LOCKED(base
);
2035 if (!base
->th_notify_fn
)
2037 if (base
->is_notify_pending
)
2039 base
->is_notify_pending
= 1;
2040 return base
->th_notify_fn(base
);
2043 /* Implementation function to add an event. Works just like event_add,
2044 * except: 1) it requires that we have the lock. 2) if tv_is_absolute is set,
2045 * we treat tv as an absolute time, not as an interval to add to the current
2048 event_add_internal(struct event
*ev
, const struct timeval
*tv
,
2051 struct event_base
*base
= ev
->ev_base
;
2055 EVENT_BASE_ASSERT_LOCKED(base
);
2056 _event_debug_assert_is_setup(ev
);
2059 "event_add: event: %p (fd "EV_SOCK_FMT
"), %s%s%scall %p",
2061 EV_SOCK_ARG(ev
->ev_fd
),
2062 ev
->ev_events
& EV_READ
? "EV_READ " : " ",
2063 ev
->ev_events
& EV_WRITE
? "EV_WRITE " : " ",
2064 tv
? "EV_TIMEOUT " : " ",
2067 EVUTIL_ASSERT(!(ev
->ev_flags
& ~EVLIST_ALL
));
2070 * prepare for timeout insertion further below, if we get a
2071 * failure on any step, we should not change any state.
2073 if (tv
!= NULL
&& !(ev
->ev_flags
& EVLIST_TIMEOUT
)) {
2074 if (min_heap_reserve(&base
->timeheap
,
2075 1 + min_heap_size(&base
->timeheap
)) == -1)
2076 return (-1); /* ENOMEM == errno */
2079 /* If the main thread is currently executing a signal event's
2080 * callback, and we are not the main thread, then we want to wait
2081 * until the callback is done before we mess with the event, or else
2082 * we can race on ev_ncalls and ev_pncalls below. */
2083 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
2084 if (base
->current_event
== ev
&& (ev
->ev_events
& EV_SIGNAL
)
2085 && !EVBASE_IN_THREAD(base
)) {
2086 ++base
->current_event_waiters
;
2087 EVTHREAD_COND_WAIT(base
->current_event_cond
, base
->th_base_lock
);
2091 if ((ev
->ev_events
& (EV_READ
|EV_WRITE
|EV_SIGNAL
)) &&
2092 !(ev
->ev_flags
& (EVLIST_INSERTED
|EVLIST_ACTIVE
))) {
2093 if (ev
->ev_events
& (EV_READ
|EV_WRITE
))
2094 res
= evmap_io_add(base
, ev
->ev_fd
, ev
);
2095 else if (ev
->ev_events
& EV_SIGNAL
)
2096 res
= evmap_signal_add(base
, (int)ev
->ev_fd
, ev
);
2098 event_queue_insert(base
, ev
, EVLIST_INSERTED
);
2100 /* evmap says we need to notify the main thread. */
2107 * we should change the timeout state only if the previous event
2108 * addition succeeded.
2110 if (res
!= -1 && tv
!= NULL
) {
2115 * for persistent timeout events, we remember the
2116 * timeout value and re-add the event.
2118 * If tv_is_absolute, this was already set.
2120 if (ev
->ev_closure
== EV_CLOSURE_PERSIST
&& !tv_is_absolute
)
2121 ev
->ev_io_timeout
= *tv
;
2124 * we already reserved memory above for the case where we
2125 * are not replacing an existing timeout.
2127 if (ev
->ev_flags
& EVLIST_TIMEOUT
) {
2128 /* XXX I believe this is needless. */
2129 if (min_heap_elt_is_top(ev
))
2131 event_queue_remove(base
, ev
, EVLIST_TIMEOUT
);
2134 /* Check if it is active due to a timeout. Rescheduling
2135 * this timeout before the callback can be executed
2136 * removes it from the active list. */
2137 if ((ev
->ev_flags
& EVLIST_ACTIVE
) &&
2138 (ev
->ev_res
& EV_TIMEOUT
)) {
2139 if (ev
->ev_events
& EV_SIGNAL
) {
2140 /* See if we are just active executing
2141 * this event in a loop
2143 if (ev
->ev_ncalls
&& ev
->ev_pncalls
) {
2145 *ev
->ev_pncalls
= 0;
2149 event_queue_remove(base
, ev
, EVLIST_ACTIVE
);
2152 gettime(base
, &now
);
2154 common_timeout
= is_common_timeout(tv
, base
);
2155 if (tv_is_absolute
) {
2156 ev
->ev_timeout
= *tv
;
2157 } else if (common_timeout
) {
2158 struct timeval tmp
= *tv
;
2159 tmp
.tv_usec
&= MICROSECONDS_MASK
;
2160 evutil_timeradd(&now
, &tmp
, &ev
->ev_timeout
);
2161 ev
->ev_timeout
.tv_usec
|=
2162 (tv
->tv_usec
& ~MICROSECONDS_MASK
);
2164 evutil_timeradd(&now
, tv
, &ev
->ev_timeout
);
2168 "event_add: timeout in %d seconds, call %p",
2169 (int)tv
->tv_sec
, ev
->ev_callback
));
2171 event_queue_insert(base
, ev
, EVLIST_TIMEOUT
);
2172 if (common_timeout
) {
2173 struct common_timeout_list
*ctl
=
2174 get_common_timeout_list(base
, &ev
->ev_timeout
);
2175 if (ev
== TAILQ_FIRST(&ctl
->events
)) {
2176 common_timeout_schedule(ctl
, &now
, ev
);
2179 /* See if the earliest timeout is now earlier than it
2180 * was before: if so, we will need to tell the main
2181 * thread to wake up earlier than it would
2183 if (min_heap_elt_is_top(ev
))
2188 /* if we are not in the right thread, we need to wake up the loop */
2189 if (res
!= -1 && notify
&& EVBASE_NEED_NOTIFY(base
))
2190 evthread_notify_base(base
);
2192 _event_debug_note_add(ev
);
2198 event_del(struct event
*ev
)
2202 if (EVUTIL_FAILURE_CHECK(!ev
->ev_base
)) {
2203 event_warnx("%s: event has no event_base set.", __func__
);
2207 EVBASE_ACQUIRE_LOCK(ev
->ev_base
, th_base_lock
);
2209 res
= event_del_internal(ev
);
2211 EVBASE_RELEASE_LOCK(ev
->ev_base
, th_base_lock
);
2216 /* Helper for event_del: always called with th_base_lock held. */
2218 event_del_internal(struct event
*ev
)
2220 struct event_base
*base
;
2221 int res
= 0, notify
= 0;
2223 event_debug(("event_del: %p (fd "EV_SOCK_FMT
"), callback %p",
2224 ev
, EV_SOCK_ARG(ev
->ev_fd
), ev
->ev_callback
));
2226 /* An event without a base has not been added */
2227 if (ev
->ev_base
== NULL
)
2230 EVENT_BASE_ASSERT_LOCKED(ev
->ev_base
);
2232 /* If the main thread is currently executing this event's callback,
2233 * and we are not the main thread, then we want to wait until the
2234 * callback is done before we start removing the event. That way,
2235 * when this function returns, it will be safe to free the
2236 * user-supplied argument. */
2238 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
2239 if (base
->current_event
== ev
&& !EVBASE_IN_THREAD(base
)) {
2240 ++base
->current_event_waiters
;
2241 EVTHREAD_COND_WAIT(base
->current_event_cond
, base
->th_base_lock
);
2245 EVUTIL_ASSERT(!(ev
->ev_flags
& ~EVLIST_ALL
));
2247 /* See if we are just active executing this event in a loop */
2248 if (ev
->ev_events
& EV_SIGNAL
) {
2249 if (ev
->ev_ncalls
&& ev
->ev_pncalls
) {
2251 *ev
->ev_pncalls
= 0;
2255 if (ev
->ev_flags
& EVLIST_TIMEOUT
) {
2256 /* NOTE: We never need to notify the main thread because of a
2257 * deleted timeout event: all that could happen if we don't is
2258 * that the dispatch loop might wake up too early. But the
2259 * point of notifying the main thread _is_ to wake up the
2260 * dispatch loop early anyway, so we wouldn't gain anything by
2263 event_queue_remove(base
, ev
, EVLIST_TIMEOUT
);
2266 if (ev
->ev_flags
& EVLIST_ACTIVE
)
2267 event_queue_remove(base
, ev
, EVLIST_ACTIVE
);
2269 if (ev
->ev_flags
& EVLIST_INSERTED
) {
2270 event_queue_remove(base
, ev
, EVLIST_INSERTED
);
2271 if (ev
->ev_events
& (EV_READ
|EV_WRITE
))
2272 res
= evmap_io_del(base
, ev
->ev_fd
, ev
);
2274 res
= evmap_signal_del(base
, (int)ev
->ev_fd
, ev
);
2276 /* evmap says we need to notify the main thread. */
2282 /* if we are not in the right thread, we need to wake up the loop */
2283 if (res
!= -1 && notify
&& EVBASE_NEED_NOTIFY(base
))
2284 evthread_notify_base(base
);
2286 _event_debug_note_del(ev
);
2292 event_active(struct event
*ev
, int res
, short ncalls
)
2294 if (EVUTIL_FAILURE_CHECK(!ev
->ev_base
)) {
2295 event_warnx("%s: event has no event_base set.", __func__
);
2299 EVBASE_ACQUIRE_LOCK(ev
->ev_base
, th_base_lock
);
2301 _event_debug_assert_is_setup(ev
);
2303 event_active_nolock(ev
, res
, ncalls
);
2305 EVBASE_RELEASE_LOCK(ev
->ev_base
, th_base_lock
);
2310 event_active_nolock(struct event
*ev
, int res
, short ncalls
)
2312 struct event_base
*base
;
2314 event_debug(("event_active: %p (fd "EV_SOCK_FMT
"), res %d, callback %p",
2315 ev
, EV_SOCK_ARG(ev
->ev_fd
), (int)res
, ev
->ev_callback
));
2318 /* We get different kinds of events, add them together */
2319 if (ev
->ev_flags
& EVLIST_ACTIVE
) {
2326 EVENT_BASE_ASSERT_LOCKED(base
);
2330 if (ev
->ev_pri
< base
->event_running_priority
)
2331 base
->event_continue
= 1;
2333 if (ev
->ev_events
& EV_SIGNAL
) {
2334 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
2335 if (base
->current_event
== ev
&& !EVBASE_IN_THREAD(base
)) {
2336 ++base
->current_event_waiters
;
2337 EVTHREAD_COND_WAIT(base
->current_event_cond
, base
->th_base_lock
);
2340 ev
->ev_ncalls
= ncalls
;
2341 ev
->ev_pncalls
= NULL
;
2344 event_queue_insert(base
, ev
, EVLIST_ACTIVE
);
2346 if (EVBASE_NEED_NOTIFY(base
))
2347 evthread_notify_base(base
);
2351 event_deferred_cb_init(struct deferred_cb
*cb
, deferred_cb_fn fn
, void *arg
)
2353 memset(cb
, 0, sizeof(struct deferred_cb
));
2359 event_deferred_cb_cancel(struct deferred_cb_queue
*queue
,
2360 struct deferred_cb
*cb
)
2364 queue
= ¤t_base
->defer_queue
;
2369 LOCK_DEFERRED_QUEUE(queue
);
2371 TAILQ_REMOVE(&queue
->deferred_cb_list
, cb
, cb_next
);
2372 --queue
->active_count
;
2375 UNLOCK_DEFERRED_QUEUE(queue
);
2379 event_deferred_cb_schedule(struct deferred_cb_queue
*queue
,
2380 struct deferred_cb
*cb
)
2384 queue
= ¤t_base
->defer_queue
;
2389 LOCK_DEFERRED_QUEUE(queue
);
2392 TAILQ_INSERT_TAIL(&queue
->deferred_cb_list
, cb
, cb_next
);
2393 ++queue
->active_count
;
2394 if (queue
->notify_fn
)
2395 queue
->notify_fn(queue
, queue
->notify_arg
);
2397 UNLOCK_DEFERRED_QUEUE(queue
);
2401 timeout_next(struct event_base
*base
, struct timeval
**tv_p
)
2403 /* Caller must hold th_base_lock */
2406 struct timeval
*tv
= *tv_p
;
2409 ev
= min_heap_top(&base
->timeheap
);
2412 /* if no time-based events are active wait for I/O */
2417 if (gettime(base
, &now
) == -1) {
2422 if (evutil_timercmp(&ev
->ev_timeout
, &now
, <=)) {
2423 evutil_timerclear(tv
);
2427 evutil_timersub(&ev
->ev_timeout
, &now
, tv
);
2429 EVUTIL_ASSERT(tv
->tv_sec
>= 0);
2430 EVUTIL_ASSERT(tv
->tv_usec
>= 0);
2431 event_debug(("timeout_next: in %d seconds", (int)tv
->tv_sec
));
2438 * Determines if the time is running backwards by comparing the current time
2439 * against the last time we checked. Not needed when using clock monotonic.
2440 * If time is running backwards, we adjust the firing time of every event by
2441 * the amount that time seems to have jumped.
2444 timeout_correct(struct event_base
*base
, struct timeval
*tv
)
2446 /* Caller must hold th_base_lock. */
2455 /* Check if time is running backwards */
2458 if (evutil_timercmp(tv
, &base
->event_tv
, >=)) {
2459 base
->event_tv
= *tv
;
2463 event_debug(("%s: time is running backwards, corrected",
2465 evutil_timersub(&base
->event_tv
, tv
, &off
);
2468 * We can modify the key element of the node without destroying
2469 * the minheap property, because we change every element.
2471 pev
= base
->timeheap
.p
;
2472 size
= base
->timeheap
.n
;
2473 for (; size
-- > 0; ++pev
) {
2474 struct timeval
*ev_tv
= &(**pev
).ev_timeout
;
2475 evutil_timersub(ev_tv
, &off
, ev_tv
);
2477 for (i
=0; i
<base
->n_common_timeouts
; ++i
) {
2479 struct common_timeout_list
*ctl
=
2480 base
->common_timeout_queues
[i
];
2481 TAILQ_FOREACH(ev
, &ctl
->events
,
2482 ev_timeout_pos
.ev_next_with_common_timeout
) {
2483 struct timeval
*ev_tv
= &ev
->ev_timeout
;
2484 ev_tv
->tv_usec
&= MICROSECONDS_MASK
;
2485 evutil_timersub(ev_tv
, &off
, ev_tv
);
2486 ev_tv
->tv_usec
|= COMMON_TIMEOUT_MAGIC
|
2487 (i
<<COMMON_TIMEOUT_IDX_SHIFT
);
2491 /* Now remember what the new time turned out to be. */
2492 base
->event_tv
= *tv
;
2495 /* Activate every event whose timeout has elapsed. */
2497 timeout_process(struct event_base
*base
)
2499 /* Caller must hold lock. */
2503 if (min_heap_empty(&base
->timeheap
)) {
2507 gettime(base
, &now
);
2509 while ((ev
= min_heap_top(&base
->timeheap
))) {
2510 if (evutil_timercmp(&ev
->ev_timeout
, &now
, >))
2513 /* delete this event from the I/O queues */
2514 event_del_internal(ev
);
2516 event_debug(("timeout_process: call %p",
2518 event_active_nolock(ev
, EV_TIMEOUT
, 1);
2522 /* Remove 'ev' from 'queue' (EVLIST_...) in base. */
2524 event_queue_remove(struct event_base
*base
, struct event
*ev
, int queue
)
2526 EVENT_BASE_ASSERT_LOCKED(base
);
2528 if (!(ev
->ev_flags
& queue
)) {
2529 event_errx(1, "%s: %p(fd "EV_SOCK_FMT
") not on queue %x", __func__
,
2530 ev
, EV_SOCK_ARG(ev
->ev_fd
), queue
);
2534 if (~ev
->ev_flags
& EVLIST_INTERNAL
)
2535 base
->event_count
--;
2537 ev
->ev_flags
&= ~queue
;
2539 case EVLIST_INSERTED
:
2540 TAILQ_REMOVE(&base
->eventqueue
, ev
, ev_next
);
2543 base
->event_count_active
--;
2544 TAILQ_REMOVE(&base
->activequeues
[ev
->ev_pri
],
2545 ev
, ev_active_next
);
2547 case EVLIST_TIMEOUT
:
2548 if (is_common_timeout(&ev
->ev_timeout
, base
)) {
2549 struct common_timeout_list
*ctl
=
2550 get_common_timeout_list(base
, &ev
->ev_timeout
);
2551 TAILQ_REMOVE(&ctl
->events
, ev
,
2552 ev_timeout_pos
.ev_next_with_common_timeout
);
2554 min_heap_erase(&base
->timeheap
, ev
);
2558 event_errx(1, "%s: unknown queue %x", __func__
, queue
);
2562 /* Add 'ev' to the common timeout list in 'ev'. */
2564 insert_common_timeout_inorder(struct common_timeout_list
*ctl
,
2568 /* By all logic, we should just be able to append 'ev' to the end of
2569 * ctl->events, since the timeout on each 'ev' is set to {the common
2570 * timeout} + {the time when we add the event}, and so the events
2571 * should arrive in order of their timeeouts. But just in case
2572 * there's some wacky threading issue going on, we do a search from
2573 * the end of 'ev' to find the right insertion point.
2575 TAILQ_FOREACH_REVERSE(e
, &ctl
->events
,
2576 event_list
, ev_timeout_pos
.ev_next_with_common_timeout
) {
2577 /* This timercmp is a little sneaky, since both ev and e have
2578 * magic values in tv_usec. Fortunately, they ought to have
2579 * the _same_ magic values in tv_usec. Let's assert for that.
2582 is_same_common_timeout(&e
->ev_timeout
, &ev
->ev_timeout
));
2583 if (evutil_timercmp(&ev
->ev_timeout
, &e
->ev_timeout
, >=)) {
2584 TAILQ_INSERT_AFTER(&ctl
->events
, e
, ev
,
2585 ev_timeout_pos
.ev_next_with_common_timeout
);
2589 TAILQ_INSERT_HEAD(&ctl
->events
, ev
,
2590 ev_timeout_pos
.ev_next_with_common_timeout
);
2594 event_queue_insert(struct event_base
*base
, struct event
*ev
, int queue
)
2596 EVENT_BASE_ASSERT_LOCKED(base
);
2598 if (ev
->ev_flags
& queue
) {
2599 /* Double insertion is possible for active events */
2600 if (queue
& EVLIST_ACTIVE
)
2603 event_errx(1, "%s: %p(fd "EV_SOCK_FMT
") already on queue %x", __func__
,
2604 ev
, EV_SOCK_ARG(ev
->ev_fd
), queue
);
2608 if (~ev
->ev_flags
& EVLIST_INTERNAL
)
2609 base
->event_count
++;
2611 ev
->ev_flags
|= queue
;
2613 case EVLIST_INSERTED
:
2614 TAILQ_INSERT_TAIL(&base
->eventqueue
, ev
, ev_next
);
2617 base
->event_count_active
++;
2618 TAILQ_INSERT_TAIL(&base
->activequeues
[ev
->ev_pri
],
2621 case EVLIST_TIMEOUT
: {
2622 if (is_common_timeout(&ev
->ev_timeout
, base
)) {
2623 struct common_timeout_list
*ctl
=
2624 get_common_timeout_list(base
, &ev
->ev_timeout
);
2625 insert_common_timeout_inorder(ctl
, ev
);
2627 min_heap_push(&base
->timeheap
, ev
);
2631 event_errx(1, "%s: unknown queue %x", __func__
, queue
);
2635 /* Functions for debugging */
2638 event_get_version(void)
2640 return (_EVENT_VERSION
);
2644 event_get_version_number(void)
2646 return (_EVENT_NUMERIC_VERSION
);
2650 * No thread-safe interface needed - the information should be the same
2655 event_get_method(void)
2657 return (current_base
->evsel
->name
);
2660 #ifndef _EVENT_DISABLE_MM_REPLACEMENT
2661 static void *(*_mm_malloc_fn
)(size_t sz
) = NULL
;
2662 static void *(*_mm_realloc_fn
)(void *p
, size_t sz
) = NULL
;
2663 static void (*_mm_free_fn
)(void *p
) = NULL
;
2666 event_mm_malloc_(size_t sz
)
2669 return _mm_malloc_fn(sz
);
2675 event_mm_calloc_(size_t count
, size_t size
)
2677 if (_mm_malloc_fn
) {
2678 size_t sz
= count
* size
;
2679 void *p
= _mm_malloc_fn(sz
);
2684 return calloc(count
, size
);
2688 event_mm_strdup_(const char *str
)
2690 if (_mm_malloc_fn
) {
2691 size_t ln
= strlen(str
);
2692 void *p
= _mm_malloc_fn(ln
+1);
2694 memcpy(p
, str
, ln
+1);
2698 return _strdup(str
);
2705 event_mm_realloc_(void *ptr
, size_t sz
)
2708 return _mm_realloc_fn(ptr
, sz
);
2710 return realloc(ptr
, sz
);
2714 event_mm_free_(void *ptr
)
2723 event_set_mem_functions(void *(*malloc_fn
)(size_t sz
),
2724 void *(*realloc_fn
)(void *ptr
, size_t sz
),
2725 void (*free_fn
)(void *ptr
))
2727 _mm_malloc_fn
= malloc_fn
;
2728 _mm_realloc_fn
= realloc_fn
;
2729 _mm_free_fn
= free_fn
;
2733 #if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
2735 evthread_notify_drain_eventfd(evutil_socket_t fd
, short what
, void *arg
)
2739 struct event_base
*base
= arg
;
2741 r
= read(fd
, (void*) &msg
, sizeof(msg
));
2742 if (r
<0 && errno
!= EAGAIN
) {
2743 event_sock_warn(fd
, "Error reading from eventfd");
2745 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
2746 base
->is_notify_pending
= 0;
2747 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
2752 evthread_notify_drain_default(evutil_socket_t fd
, short what
, void *arg
)
2754 unsigned char buf
[1024];
2755 struct event_base
*base
= arg
;
2757 while (recv(fd
, (char*)buf
, sizeof(buf
), 0) > 0)
2760 while (read(fd
, (char*)buf
, sizeof(buf
)) > 0)
2764 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
2765 base
->is_notify_pending
= 0;
2766 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
2770 evthread_make_base_notifiable(struct event_base
*base
)
2772 void (*cb
)(evutil_socket_t
, short, void *) = evthread_notify_drain_default
;
2773 int (*notify
)(struct event_base
*) = evthread_notify_base_default
;
2775 /* XXXX grab the lock here? */
2779 if (base
->th_notify_fd
[0] >= 0)
2782 #if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
2784 #define EFD_CLOEXEC 0
2786 base
->th_notify_fd
[0] = eventfd(0, EFD_CLOEXEC
);
2787 if (base
->th_notify_fd
[0] >= 0) {
2788 evutil_make_socket_closeonexec(base
->th_notify_fd
[0]);
2789 notify
= evthread_notify_base_eventfd
;
2790 cb
= evthread_notify_drain_eventfd
;
2793 #if defined(_EVENT_HAVE_PIPE)
2794 if (base
->th_notify_fd
[0] < 0) {
2795 if ((base
->evsel
->features
& EV_FEATURE_FDS
)) {
2796 if (pipe(base
->th_notify_fd
) < 0) {
2797 event_warn("%s: pipe", __func__
);
2799 evutil_make_socket_closeonexec(base
->th_notify_fd
[0]);
2800 evutil_make_socket_closeonexec(base
->th_notify_fd
[1]);
2807 #define LOCAL_SOCKETPAIR_AF AF_INET
2809 #define LOCAL_SOCKETPAIR_AF AF_UNIX
2811 if (base
->th_notify_fd
[0] < 0) {
2812 if (evutil_socketpair(LOCAL_SOCKETPAIR_AF
, SOCK_STREAM
, 0,
2813 base
->th_notify_fd
) == -1) {
2814 event_sock_warn(-1, "%s: socketpair", __func__
);
2817 evutil_make_socket_closeonexec(base
->th_notify_fd
[0]);
2818 evutil_make_socket_closeonexec(base
->th_notify_fd
[1]);
2822 evutil_make_socket_nonblocking(base
->th_notify_fd
[0]);
2824 base
->th_notify_fn
= notify
;
2827 Making the second socket nonblocking is a bit subtle, given that we
2828 ignore any EAGAIN returns when writing to it, and you don't usally
2829 do that for a nonblocking socket. But if the kernel gives us EAGAIN,
2830 then there's no need to add any more data to the buffer, since
2831 the main thread is already either about to wake up and drain it,
2832 or woken up and in the process of draining it.
2834 if (base
->th_notify_fd
[1] > 0)
2835 evutil_make_socket_nonblocking(base
->th_notify_fd
[1]);
2837 /* prepare an event that we can use for wakeup */
2838 event_assign(&base
->th_notify
, base
, base
->th_notify_fd
[0],
2839 EV_READ
|EV_PERSIST
, cb
, base
);
2841 /* we need to mark this as internal event */
2842 base
->th_notify
.ev_flags
|= EVLIST_INTERNAL
;
2843 event_priority_set(&base
->th_notify
, 0);
2845 return event_add(&base
->th_notify
, NULL
);
2849 event_base_dump_events(struct event_base
*base
, FILE *output
)
2853 fprintf(output
, "Inserted events:\n");
2854 TAILQ_FOREACH(e
, &base
->eventqueue
, ev_next
) {
2855 fprintf(output
, " %p [fd "EV_SOCK_FMT
"]%s%s%s%s%s\n",
2856 (void*)e
, EV_SOCK_ARG(e
->ev_fd
),
2857 (e
->ev_events
&EV_READ
)?" Read":"",
2858 (e
->ev_events
&EV_WRITE
)?" Write":"",
2859 (e
->ev_events
&EV_SIGNAL
)?" Signal":"",
2860 (e
->ev_events
&EV_TIMEOUT
)?" Timeout":"",
2861 (e
->ev_events
&EV_PERSIST
)?" Persist":"");
2864 for (i
= 0; i
< base
->nactivequeues
; ++i
) {
2865 if (TAILQ_EMPTY(&base
->activequeues
[i
]))
2867 fprintf(output
, "Active events [priority %d]:\n", i
);
2868 TAILQ_FOREACH(e
, &base
->eventqueue
, ev_next
) {
2869 fprintf(output
, " %p [fd "EV_SOCK_FMT
"]%s%s%s%s\n",
2870 (void*)e
, EV_SOCK_ARG(e
->ev_fd
),
2871 (e
->ev_res
&EV_READ
)?" Read active":"",
2872 (e
->ev_res
&EV_WRITE
)?" Write active":"",
2873 (e
->ev_res
&EV_SIGNAL
)?" Signal active":"",
2874 (e
->ev_res
&EV_TIMEOUT
)?" Timeout active":"");
2880 event_base_add_virtual(struct event_base
*base
)
2882 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
2883 base
->virtual_event_count
++;
2884 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
2888 event_base_del_virtual(struct event_base
*base
)
2890 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
2891 EVUTIL_ASSERT(base
->virtual_event_count
> 0);
2892 base
->virtual_event_count
--;
2893 if (base
->virtual_event_count
== 0 && EVBASE_NEED_NOTIFY(base
))
2894 evthread_notify_base(base
);
2895 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
2898 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
2900 event_global_setup_locks_(const int enable_locks
)
2902 #ifndef _EVENT_DISABLE_DEBUG_MODE
2903 EVTHREAD_SETUP_GLOBAL_LOCK(_event_debug_map_lock
, 0);
2905 if (evsig_global_setup_locks_(enable_locks
) < 0)
2907 if (evutil_secure_rng_global_setup_locks_(enable_locks
) < 0)
2914 event_base_assert_ok(struct event_base
*base
)
2917 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
2918 evmap_check_integrity(base
);
2920 /* Check the heap property */
2921 for (i
= 1; i
< (int)base
->timeheap
.n
; ++i
) {
2922 int parent
= (i
- 1) / 2;
2923 struct event
*ev
, *p_ev
;
2924 ev
= base
->timeheap
.p
[i
];
2925 p_ev
= base
->timeheap
.p
[parent
];
2926 EVUTIL_ASSERT(ev
->ev_flags
& EV_TIMEOUT
);
2927 EVUTIL_ASSERT(evutil_timercmp(&p_ev
->ev_timeout
, &ev
->ev_timeout
, <=));
2928 EVUTIL_ASSERT(ev
->ev_timeout_pos
.min_heap_idx
== i
);
2931 /* Check that the common timeouts are fine */
2932 for (i
= 0; i
< base
->n_common_timeouts
; ++i
) {
2933 struct common_timeout_list
*ctl
= base
->common_timeout_queues
[i
];
2934 struct event
*last
=NULL
, *ev
;
2935 TAILQ_FOREACH(ev
, &ctl
->events
, ev_timeout_pos
.ev_next_with_common_timeout
) {
2937 EVUTIL_ASSERT(evutil_timercmp(&last
->ev_timeout
, &ev
->ev_timeout
, <=));
2938 EVUTIL_ASSERT(ev
->ev_flags
& EV_TIMEOUT
);
2939 EVUTIL_ASSERT(is_common_timeout(&ev
->ev_timeout
,base
));
2940 EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev
->ev_timeout
) == i
);
2945 EVBASE_RELEASE_LOCK(base
, th_base_lock
);