2 * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
3 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "event2/event-config.h"
28 #include "evconfig-private.h"
32 #define WIN32_LEAN_AND_MEAN
34 #undef WIN32_LEAN_AND_MEAN
36 #include <sys/types.h>
37 #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
40 #include <sys/queue.h>
41 #ifdef EVENT__HAVE_SYS_SOCKET_H
42 #include <sys/socket.h>
46 #ifdef EVENT__HAVE_UNISTD_H
56 #include "event2/event.h"
57 #include "event2/event_struct.h"
58 #include "event2/event_compat.h"
59 #include "event-internal.h"
60 #include "defer-internal.h"
61 #include "evthread-internal.h"
62 #include "event2/thread.h"
63 #include "event2/util.h"
64 #include "log-internal.h"
65 #include "evmap-internal.h"
66 #include "iocp-internal.h"
67 #include "changelist-internal.h"
68 #define HT_NO_CACHE_HASH_VALUES
69 #include "ht-internal.h"
70 #include "util-internal.h"
73 #ifdef EVENT__HAVE_WORKING_KQUEUE
74 #include "kqueue-internal.h"
77 #ifdef EVENT__HAVE_EVENT_PORTS
78 extern const struct eventop evportops
;
80 #ifdef EVENT__HAVE_SELECT
81 extern const struct eventop selectops
;
83 #ifdef EVENT__HAVE_POLL
84 extern const struct eventop pollops
;
86 #ifdef EVENT__HAVE_EPOLL
87 extern const struct eventop epollops
;
89 #ifdef EVENT__HAVE_WORKING_KQUEUE
90 extern const struct eventop kqops
;
92 #ifdef EVENT__HAVE_DEVPOLL
93 extern const struct eventop devpollops
;
96 extern const struct eventop win32ops
;
99 /* Array of backends in order of preference. */
100 static const struct eventop
*eventops
[] = {
101 #ifdef EVENT__HAVE_EVENT_PORTS
104 #ifdef EVENT__HAVE_WORKING_KQUEUE
107 #ifdef EVENT__HAVE_EPOLL
110 #ifdef EVENT__HAVE_DEVPOLL
113 #ifdef EVENT__HAVE_POLL
116 #ifdef EVENT__HAVE_SELECT
125 /* Global state; deprecated */
126 struct event_base
*event_global_current_base_
= NULL
;
127 #define current_base event_global_current_base_
131 static void *event_self_cbarg_ptr_
= NULL
;
134 static void event_queue_insert_active(struct event_base
*, struct event_callback
*);
135 static void event_queue_insert_active_later(struct event_base
*, struct event_callback
*);
136 static void event_queue_insert_timeout(struct event_base
*, struct event
*);
137 static void event_queue_insert_inserted(struct event_base
*, struct event
*);
138 static void event_queue_remove_active(struct event_base
*, struct event_callback
*);
139 static void event_queue_remove_active_later(struct event_base
*, struct event_callback
*);
140 static void event_queue_remove_timeout(struct event_base
*, struct event
*);
141 static void event_queue_remove_inserted(struct event_base
*, struct event
*);
142 static void event_queue_make_later_events_active(struct event_base
*base
);
144 static int evthread_make_base_notifiable_nolock_(struct event_base
*base
);
145 static int event_del_(struct event
*ev
, int blocking
);
147 #ifdef USE_REINSERT_TIMEOUT
148 /* This code seems buggy; only turn it on if we find out what the trouble is. */
149 static void event_queue_reinsert_timeout(struct event_base
*,struct event
*, int was_common
, int is_common
, int old_timeout_idx
);
152 static int event_haveevents(struct event_base
*);
154 static int event_process_active(struct event_base
*);
156 static int timeout_next(struct event_base
*, struct timeval
**);
157 static void timeout_process(struct event_base
*);
159 static inline void event_signal_closure(struct event_base
*, struct event
*ev
);
160 static inline void event_persist_closure(struct event_base
*, struct event
*ev
);
162 static int evthread_notify_base(struct event_base
*base
);
164 static void insert_common_timeout_inorder(struct common_timeout_list
*ctl
,
167 #ifndef EVENT__DISABLE_DEBUG_MODE
168 /* These functions implement a hashtable of which 'struct event *' structures
169 * have been setup or added. We don't want to trust the content of the struct
170 * event itself, since we're trying to work through cases where an event gets
171 * clobbered or freed. Instead, we keep a hashtable indexed by the pointer.
174 struct event_debug_entry
{
175 HT_ENTRY(event_debug_entry
) node
;
176 const struct event
*ptr
;
180 static inline unsigned
181 hash_debug_entry(const struct event_debug_entry
*e
)
183 /* We need to do this silliness to convince compilers that we
184 * honestly mean to cast e->ptr to an integer, and discard any
185 * part of it that doesn't fit in an unsigned.
187 unsigned u
= (unsigned) ((ev_uintptr_t
) e
->ptr
);
188 /* Our hashtable implementation is pretty sensitive to low bits,
189 * and every struct event is over 64 bytes in size, so we can
195 eq_debug_entry(const struct event_debug_entry
*a
,
196 const struct event_debug_entry
*b
)
198 return a
->ptr
== b
->ptr
;
201 int event_debug_mode_on_
= 0;
204 #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
206 * @brief debug mode variable which is set for any function/structure that needs
207 * to be shared across threads (if thread support is enabled).
209 * When and if evthreads are initialized, this variable will be evaluated,
210 * and if set to something other than zero, this means the evthread setup
211 * functions were called out of order.
213 * See: "Locks and threading" in the documentation.
215 int event_debug_created_threadable_ctx_
= 0;
218 /* Set if it's too late to enable event_debug_mode. */
219 static int event_debug_mode_too_late
= 0;
220 #ifndef EVENT__DISABLE_THREAD_SUPPORT
221 static void *event_debug_map_lock_
= NULL
;
223 static HT_HEAD(event_debug_map
, event_debug_entry
) global_debug_map
=
226 HT_PROTOTYPE(event_debug_map
, event_debug_entry
, node
, hash_debug_entry
,
228 HT_GENERATE(event_debug_map
, event_debug_entry
, node
, hash_debug_entry
,
229 eq_debug_entry
, 0.5, mm_malloc
, mm_realloc
, mm_free
)
231 /* Macro: record that ev is now setup (that is, ready for an add) */
232 #define event_debug_note_setup_(ev) do { \
233 if (event_debug_mode_on_) { \
234 struct event_debug_entry *dent,find; \
236 EVLOCK_LOCK(event_debug_map_lock_, 0); \
237 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
241 dent = mm_malloc(sizeof(*dent)); \
244 "Out of memory in debugging code"); \
247 HT_INSERT(event_debug_map, &global_debug_map, dent); \
249 EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
251 event_debug_mode_too_late = 1; \
253 /* Macro: record that ev is no longer setup */
254 #define event_debug_note_teardown_(ev) do { \
255 if (event_debug_mode_on_) { \
256 struct event_debug_entry *dent,find; \
258 EVLOCK_LOCK(event_debug_map_lock_, 0); \
259 dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \
262 EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
264 event_debug_mode_too_late = 1; \
266 /* Macro: record that ev is now added */
267 #define event_debug_note_add_(ev) do { \
268 if (event_debug_mode_on_) { \
269 struct event_debug_entry *dent,find; \
271 EVLOCK_LOCK(event_debug_map_lock_, 0); \
272 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
276 event_errx(EVENT_ERR_ABORT_, \
277 "%s: noting an add on a non-setup event %p" \
278 " (events: 0x%x, fd: "EV_SOCK_FMT \
280 __func__, (ev), (ev)->ev_events, \
281 EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
283 EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
285 event_debug_mode_too_late = 1; \
287 /* Macro: record that ev is no longer added */
288 #define event_debug_note_del_(ev) do { \
289 if (event_debug_mode_on_) { \
290 struct event_debug_entry *dent,find; \
292 EVLOCK_LOCK(event_debug_map_lock_, 0); \
293 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
297 event_errx(EVENT_ERR_ABORT_, \
298 "%s: noting a del on a non-setup event %p" \
299 " (events: 0x%x, fd: "EV_SOCK_FMT \
301 __func__, (ev), (ev)->ev_events, \
302 EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
304 EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
306 event_debug_mode_too_late = 1; \
308 /* Macro: assert that ev is setup (i.e., okay to add or inspect) */
309 #define event_debug_assert_is_setup_(ev) do { \
310 if (event_debug_mode_on_) { \
311 struct event_debug_entry *dent,find; \
313 EVLOCK_LOCK(event_debug_map_lock_, 0); \
314 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
316 event_errx(EVENT_ERR_ABORT_, \
317 "%s called on a non-initialized event %p" \
318 " (events: 0x%x, fd: "EV_SOCK_FMT\
320 __func__, (ev), (ev)->ev_events, \
321 EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
323 EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
326 /* Macro: assert that ev is not added (i.e., okay to tear down or set
328 #define event_debug_assert_not_added_(ev) do { \
329 if (event_debug_mode_on_) { \
330 struct event_debug_entry *dent,find; \
332 EVLOCK_LOCK(event_debug_map_lock_, 0); \
333 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
334 if (dent && dent->added) { \
335 event_errx(EVENT_ERR_ABORT_, \
336 "%s called on an already added event %p" \
337 " (events: 0x%x, fd: "EV_SOCK_FMT", " \
339 __func__, (ev), (ev)->ev_events, \
340 EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
342 EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
346 #define event_debug_note_setup_(ev) \
348 #define event_debug_note_teardown_(ev) \
350 #define event_debug_note_add_(ev) \
352 #define event_debug_note_del_(ev) \
354 #define event_debug_assert_is_setup_(ev) \
356 #define event_debug_assert_not_added_(ev) \
360 #define EVENT_BASE_ASSERT_LOCKED(base) \
361 EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
363 /* How often (in seconds) do we check for changes in wall clock time relative
364 * to monotonic time? Set this to -1 for 'never.' */
365 #define CLOCK_SYNC_INTERVAL 5
367 /** Set 'tp' to the current time according to 'base'. We must hold the lock
368 * on 'base'. If there is a cached time, return it. Otherwise, use
369 * clock_gettime or gettimeofday as appropriate to find out the right time.
370 * Return 0 on success, -1 on failure.
373 gettime(struct event_base
*base
, struct timeval
*tp
)
375 EVENT_BASE_ASSERT_LOCKED(base
);
377 if (base
->tv_cache
.tv_sec
) {
378 *tp
= base
->tv_cache
;
382 if (evutil_gettime_monotonic_(&base
->monotonic_timer
, tp
) == -1) {
386 if (base
->last_updated_clock_diff
+ CLOCK_SYNC_INTERVAL
389 evutil_gettimeofday(&tv
,NULL
);
390 evutil_timersub(&tv
, tp
, &base
->tv_clock_diff
);
391 base
->last_updated_clock_diff
= tp
->tv_sec
;
398 event_base_gettimeofday_cached(struct event_base
*base
, struct timeval
*tv
)
404 return evutil_gettimeofday(tv
, NULL
);
407 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
408 if (base
->tv_cache
.tv_sec
== 0) {
409 r
= evutil_gettimeofday(tv
, NULL
);
411 evutil_timeradd(&base
->tv_cache
, &base
->tv_clock_diff
, tv
);
414 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
418 /** Make 'base' have no current cached time. */
420 clear_time_cache(struct event_base
*base
)
422 base
->tv_cache
.tv_sec
= 0;
425 /** Replace the cached time in 'base' with the current time. */
427 update_time_cache(struct event_base
*base
)
429 base
->tv_cache
.tv_sec
= 0;
430 if (!(base
->flags
& EVENT_BASE_FLAG_NO_CACHE_TIME
))
431 gettime(base
, &base
->tv_cache
);
435 event_base_update_cache_time(struct event_base
*base
)
444 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
445 if (base
->running_loop
)
446 update_time_cache(base
);
447 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
451 static inline struct event
*
452 event_callback_to_event(struct event_callback
*evcb
)
454 EVUTIL_ASSERT((evcb
->evcb_flags
& EVLIST_INIT
));
455 return EVUTIL_UPCAST(evcb
, struct event
, ev_evcallback
);
458 static inline struct event_callback
*
459 event_to_event_callback(struct event
*ev
)
461 return &ev
->ev_evcallback
;
467 struct event_base
*base
= event_base_new_with_config(NULL
);
470 event_errx(1, "%s: Unable to construct event_base", __func__
);
482 struct event_base
*base
= NULL
;
483 struct event_config
*cfg
= event_config_new();
485 base
= event_base_new_with_config(cfg
);
486 event_config_free(cfg
);
491 /** Return true iff 'method' is the name of a method that 'cfg' tells us to
494 event_config_is_avoided_method(const struct event_config
*cfg
,
497 struct event_config_entry
*entry
;
499 TAILQ_FOREACH(entry
, &cfg
->entries
, next
) {
500 if (entry
->avoid_method
!= NULL
&&
501 strcmp(entry
->avoid_method
, method
) == 0)
508 /** Return true iff 'method' is disabled according to the environment. */
510 event_is_method_disabled(const char *name
)
512 char environment
[64];
515 evutil_snprintf(environment
, sizeof(environment
), "EVENT_NO%s", name
);
516 for (i
= 8; environment
[i
] != '\0'; ++i
)
517 environment
[i
] = EVUTIL_TOUPPER_(environment
[i
]);
518 /* Note that evutil_getenv_() ignores the environment entirely if
520 return (evutil_getenv_(environment
) != NULL
);
524 event_base_get_features(const struct event_base
*base
)
526 return base
->evsel
->features
;
530 event_enable_debug_mode(void)
532 #ifndef EVENT__DISABLE_DEBUG_MODE
533 if (event_debug_mode_on_
)
534 event_errx(1, "%s was called twice!", __func__
);
535 if (event_debug_mode_too_late
)
536 event_errx(1, "%s must be called *before* creating any events "
537 "or event_bases",__func__
);
539 event_debug_mode_on_
= 1;
541 HT_INIT(event_debug_map
, &global_debug_map
);
546 event_disable_debug_mode(void)
548 #ifndef EVENT__DISABLE_DEBUG_MODE
549 struct event_debug_entry
**ent
, *victim
;
551 EVLOCK_LOCK(event_debug_map_lock_
, 0);
552 for (ent
= HT_START(event_debug_map
, &global_debug_map
); ent
; ) {
554 ent
= HT_NEXT_RMV(event_debug_map
, &global_debug_map
, ent
);
557 HT_CLEAR(event_debug_map
, &global_debug_map
);
558 EVLOCK_UNLOCK(event_debug_map_lock_
, 0);
560 event_debug_mode_on_
= 0;
565 event_base_new_with_config(const struct event_config
*cfg
)
568 struct event_base
*base
;
569 int should_check_environment
;
571 #ifndef EVENT__DISABLE_DEBUG_MODE
572 event_debug_mode_too_late
= 1;
575 if ((base
= mm_calloc(1, sizeof(struct event_base
))) == NULL
) {
576 event_warn("%s: calloc", __func__
);
581 base
->flags
= cfg
->flags
;
583 should_check_environment
=
584 !(cfg
&& (cfg
->flags
& EVENT_BASE_FLAG_IGNORE_ENV
));
589 cfg
&& (cfg
->flags
& EVENT_BASE_FLAG_PRECISE_TIMER
);
591 if (should_check_environment
&& !precise_time
) {
592 precise_time
= evutil_getenv_("EVENT_PRECISE_TIMER") != NULL
;
593 base
->flags
|= EVENT_BASE_FLAG_PRECISE_TIMER
;
595 flags
= precise_time
? EV_MONOT_PRECISE
: 0;
596 evutil_configure_monotonic_time_(&base
->monotonic_timer
, flags
);
601 min_heap_ctor_(&base
->timeheap
);
603 base
->sig
.ev_signal_pair
[0] = -1;
604 base
->sig
.ev_signal_pair
[1] = -1;
605 base
->th_notify_fd
[0] = -1;
606 base
->th_notify_fd
[1] = -1;
608 TAILQ_INIT(&base
->active_later_queue
);
610 evmap_io_initmap_(&base
->io
);
611 evmap_signal_initmap_(&base
->sigmap
);
612 event_changelist_init_(&base
->changelist
);
617 memcpy(&base
->max_dispatch_time
,
618 &cfg
->max_dispatch_interval
, sizeof(struct timeval
));
619 base
->limit_callbacks_after_prio
=
620 cfg
->limit_callbacks_after_prio
;
622 base
->max_dispatch_time
.tv_sec
= -1;
623 base
->limit_callbacks_after_prio
= 1;
625 if (cfg
&& cfg
->max_dispatch_callbacks
>= 0) {
626 base
->max_dispatch_callbacks
= cfg
->max_dispatch_callbacks
;
628 base
->max_dispatch_callbacks
= INT_MAX
;
630 if (base
->max_dispatch_callbacks
== INT_MAX
&&
631 base
->max_dispatch_time
.tv_sec
== -1)
632 base
->limit_callbacks_after_prio
= INT_MAX
;
634 for (i
= 0; eventops
[i
] && !base
->evbase
; i
++) {
636 /* determine if this backend should be avoided */
637 if (event_config_is_avoided_method(cfg
,
640 if ((eventops
[i
]->features
& cfg
->require_features
)
641 != cfg
->require_features
)
645 /* also obey the environment variables */
646 if (should_check_environment
&&
647 event_is_method_disabled(eventops
[i
]->name
))
650 base
->evsel
= eventops
[i
];
652 base
->evbase
= base
->evsel
->init(base
);
655 if (base
->evbase
== NULL
) {
656 event_warnx("%s: no event mechanism available",
659 event_base_free(base
);
663 if (evutil_getenv_("EVENT_SHOW_METHOD"))
664 event_msgx("libevent using: %s", base
->evsel
->name
);
666 /* allocate a single active event queue */
667 if (event_base_priority_init(base
, 1) < 0) {
668 event_base_free(base
);
672 /* prepare for threading */
674 #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
675 event_debug_created_threadable_ctx_
= 1;
678 #ifndef EVENT__DISABLE_THREAD_SUPPORT
679 if (EVTHREAD_LOCKING_ENABLED() &&
680 (!cfg
|| !(cfg
->flags
& EVENT_BASE_FLAG_NOLOCK
))) {
682 EVTHREAD_ALLOC_LOCK(base
->th_base_lock
, 0);
683 EVTHREAD_ALLOC_COND(base
->current_event_cond
);
684 r
= evthread_make_base_notifiable(base
);
686 event_warnx("%s: Unable to make base notifiable.", __func__
);
687 event_base_free(base
);
694 if (cfg
&& (cfg
->flags
& EVENT_BASE_FLAG_STARTUP_IOCP
))
695 event_base_start_iocp_(base
, cfg
->n_cpus_hint
);
702 event_base_start_iocp_(struct event_base
*base
, int n_cpus
)
707 base
->iocp
= event_iocp_port_launch_(n_cpus
);
709 event_warnx("%s: Couldn't launch IOCP", __func__
);
719 event_base_stop_iocp_(struct event_base
*base
)
726 rv
= event_iocp_shutdown_(base
->iocp
, -1);
727 EVUTIL_ASSERT(rv
>= 0);
733 event_base_cancel_single_callback_(struct event_base
*base
,
734 struct event_callback
*evcb
,
739 if (evcb
->evcb_flags
& EVLIST_INIT
) {
740 struct event
*ev
= event_callback_to_event(evcb
);
741 if (!(ev
->ev_flags
& EVLIST_INTERNAL
)) {
742 event_del_(ev
, EVENT_DEL_EVEN_IF_FINALIZING
);
746 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
747 event_callback_cancel_nolock_(base
, evcb
, 1);
748 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
752 if (run_finalizers
&& (evcb
->evcb_flags
& EVLIST_FINALIZING
)) {
753 switch (evcb
->evcb_closure
) {
754 case EV_CLOSURE_EVENT_FINALIZE
:
755 case EV_CLOSURE_EVENT_FINALIZE_FREE
: {
756 struct event
*ev
= event_callback_to_event(evcb
);
757 ev
->ev_evcallback
.evcb_cb_union
.evcb_evfinalize(ev
, ev
->ev_arg
);
758 if (evcb
->evcb_closure
== EV_CLOSURE_EVENT_FINALIZE_FREE
)
762 case EV_CLOSURE_CB_FINALIZE
:
763 evcb
->evcb_cb_union
.evcb_cbfinalize(evcb
, evcb
->evcb_arg
);
772 static int event_base_free_queues_(struct event_base
*base
, int run_finalizers
)
776 for (i
= 0; i
< base
->nactivequeues
; ++i
) {
777 struct event_callback
*evcb
, *next
;
778 for (evcb
= TAILQ_FIRST(&base
->activequeues
[i
]); evcb
; ) {
779 next
= TAILQ_NEXT(evcb
, evcb_active_next
);
780 deleted
+= event_base_cancel_single_callback_(base
, evcb
, run_finalizers
);
786 struct event_callback
*evcb
;
787 while ((evcb
= TAILQ_FIRST(&base
->active_later_queue
))) {
788 deleted
+= event_base_cancel_single_callback_(base
, evcb
, run_finalizers
);
796 event_base_free_(struct event_base
*base
, int run_finalizers
)
800 /* XXXX grab the lock? If there is contention when one thread frees
801 * the base, then the contending thread will be very sad soon. */
803 /* event_base_free(NULL) is how to free the current_base if we
804 * made it with event_init and forgot to hold a reference to it. */
805 if (base
== NULL
&& current_base
)
807 /* Don't actually free NULL. */
809 event_warnx("%s: no base to free", __func__
);
812 /* XXX(niels) - check for internal events first */
815 event_base_stop_iocp_(base
);
818 /* threading fds if we have them */
819 if (base
->th_notify_fd
[0] != -1) {
820 event_del(&base
->th_notify
);
821 EVUTIL_CLOSESOCKET(base
->th_notify_fd
[0]);
822 if (base
->th_notify_fd
[1] != -1)
823 EVUTIL_CLOSESOCKET(base
->th_notify_fd
[1]);
824 base
->th_notify_fd
[0] = -1;
825 base
->th_notify_fd
[1] = -1;
826 event_debug_unassign(&base
->th_notify
);
829 /* Delete all non-internal events. */
830 evmap_delete_all_(base
);
832 while ((ev
= min_heap_top_(&base
->timeheap
)) != NULL
) {
836 for (i
= 0; i
< base
->n_common_timeouts
; ++i
) {
837 struct common_timeout_list
*ctl
=
838 base
->common_timeout_queues
[i
];
839 event_del(&ctl
->timeout_event
); /* Internal; doesn't count */
840 event_debug_unassign(&ctl
->timeout_event
);
841 for (ev
= TAILQ_FIRST(&ctl
->events
); ev
; ) {
842 struct event
*next
= TAILQ_NEXT(ev
,
843 ev_timeout_pos
.ev_next_with_common_timeout
);
844 if (!(ev
->ev_flags
& EVLIST_INTERNAL
)) {
852 if (base
->common_timeout_queues
)
853 mm_free(base
->common_timeout_queues
);
856 /* For finalizers we can register yet another finalizer out from
857 * finalizer, and iff finalizer will be in active_later_queue we can
858 * add finalizer to activequeues, and we will have events in
859 * activequeues after this function returns, which is not what we want
860 * (we even have an assertion for this).
862 * A simple case is bufferevent with underlying (i.e. filters).
864 int i
= event_base_free_queues_(base
, run_finalizers
);
872 event_debug(("%s: %d events were still set in base",
873 __func__
, n_deleted
));
875 while (LIST_FIRST(&base
->once_events
)) {
876 struct event_once
*eonce
= LIST_FIRST(&base
->once_events
);
877 LIST_REMOVE(eonce
, next_once
);
881 if (base
->evsel
!= NULL
&& base
->evsel
->dealloc
!= NULL
)
882 base
->evsel
->dealloc(base
);
884 for (i
= 0; i
< base
->nactivequeues
; ++i
)
885 EVUTIL_ASSERT(TAILQ_EMPTY(&base
->activequeues
[i
]));
887 EVUTIL_ASSERT(min_heap_empty_(&base
->timeheap
));
888 min_heap_dtor_(&base
->timeheap
);
890 mm_free(base
->activequeues
);
892 evmap_io_clear_(&base
->io
);
893 evmap_signal_clear_(&base
->sigmap
);
894 event_changelist_freemem_(&base
->changelist
);
896 EVTHREAD_FREE_LOCK(base
->th_base_lock
, 0);
897 EVTHREAD_FREE_COND(base
->current_event_cond
);
899 /* If we're freeing current_base, there won't be a current_base. */
900 if (base
== current_base
)
906 event_base_free_nofinalize(struct event_base
*base
)
908 event_base_free_(base
, 0);
912 event_base_free(struct event_base
*base
)
914 event_base_free_(base
, 1);
917 /* Fake eventop; used to disable the backend temporarily inside event_reinit
918 * so that we can call event_del() on an event without telling the backend.
921 nil_backend_del(struct event_base
*b
, evutil_socket_t fd
, short old
,
922 short events
, void *fdinfo
)
926 const struct eventop nil_eventop
= {
928 NULL
, /* init: unused. */
929 NULL
, /* add: unused. */
930 nil_backend_del
, /* del: used, so needs to be killed. */
931 NULL
, /* dispatch: unused. */
932 NULL
, /* dealloc: unused. */
936 /* reinitialize the event base after a fork */
938 event_reinit(struct event_base
*base
)
940 const struct eventop
*evsel
;
942 int was_notifiable
= 0;
943 int had_signal_added
= 0;
945 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
949 /* check if this event mechanism requires reinit on the backend */
950 if (evsel
->need_reinit
) {
951 /* We're going to call event_del() on our notify events (the
952 * ones that tell about signals and wakeup events). But we
953 * don't actually want to tell the backend to change its
954 * state, since it might still share some resource (a kqueue,
955 * an epoll fd) with the parent process, and we don't want to
956 * delete the fds from _that_ backend, we temporarily stub out
957 * the evsel with a replacement.
959 base
->evsel
= &nil_eventop
;
962 /* We need to re-create a new signal-notification fd and a new
963 * thread-notification fd. Otherwise, we'll still share those with
964 * the parent process, which would make any notification sent to them
965 * get received by one or both of the event loops, more or less at
968 if (base
->sig
.ev_signal_added
) {
969 event_del_nolock_(&base
->sig
.ev_signal
, EVENT_DEL_AUTOBLOCK
);
970 event_debug_unassign(&base
->sig
.ev_signal
);
971 memset(&base
->sig
.ev_signal
, 0, sizeof(base
->sig
.ev_signal
));
972 if (base
->sig
.ev_signal_pair
[0] != -1)
973 EVUTIL_CLOSESOCKET(base
->sig
.ev_signal_pair
[0]);
974 if (base
->sig
.ev_signal_pair
[1] != -1)
975 EVUTIL_CLOSESOCKET(base
->sig
.ev_signal_pair
[1]);
976 had_signal_added
= 1;
977 base
->sig
.ev_signal_added
= 0;
979 if (base
->th_notify_fn
!= NULL
) {
981 base
->th_notify_fn
= NULL
;
983 if (base
->th_notify_fd
[0] != -1) {
984 event_del_nolock_(&base
->th_notify
, EVENT_DEL_AUTOBLOCK
);
985 EVUTIL_CLOSESOCKET(base
->th_notify_fd
[0]);
986 if (base
->th_notify_fd
[1] != -1)
987 EVUTIL_CLOSESOCKET(base
->th_notify_fd
[1]);
988 base
->th_notify_fd
[0] = -1;
989 base
->th_notify_fd
[1] = -1;
990 event_debug_unassign(&base
->th_notify
);
993 /* Replace the original evsel. */
996 if (evsel
->need_reinit
) {
997 /* Reconstruct the backend through brute-force, so that we do
998 * not share any structures with the parent process. For some
999 * backends, this is necessary: epoll and kqueue, for
1000 * instance, have events associated with a kernel
1001 * structure. If didn't reinitialize, we'd share that
1002 * structure with the parent process, and any changes made by
1003 * the parent would affect our backend's behavior (and vice
1006 if (base
->evsel
->dealloc
!= NULL
)
1007 base
->evsel
->dealloc(base
);
1008 base
->evbase
= evsel
->init(base
);
1009 if (base
->evbase
== NULL
) {
1011 "%s: could not reinitialize event mechanism",
1017 /* Empty out the changelist (if any): we are starting from a
1019 event_changelist_freemem_(&base
->changelist
);
1021 /* Tell the event maps to re-inform the backend about all
1022 * pending events. This will make the signal notification
1023 * event get re-created if necessary. */
1024 if (evmap_reinit_(base
) < 0)
1027 if (had_signal_added
)
1028 res
= evsig_init_(base
);
1031 /* If we were notifiable before, and nothing just exploded, become
1032 * notifiable again. */
1033 if (was_notifiable
&& res
== 0)
1034 res
= evthread_make_base_notifiable_nolock_(base
);
1037 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1041 /* Get the monotonic time for this event_base' timer */
1043 event_gettime_monotonic(struct event_base
*base
, struct timeval
*tv
)
1048 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
1049 rv
= evutil_gettime_monotonic_(&(base
->monotonic_timer
), tv
);
1050 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1057 event_get_supported_methods(void)
1059 static const char **methods
= NULL
;
1060 const struct eventop
**method
;
1064 /* count all methods */
1065 for (method
= &eventops
[0]; *method
!= NULL
; ++method
) {
1069 /* allocate one more than we need for the NULL pointer */
1070 tmp
= mm_calloc((i
+ 1), sizeof(char *));
1074 /* populate the array with the supported methods */
1075 for (k
= 0, i
= 0; eventops
[k
] != NULL
; ++k
) {
1076 tmp
[i
++] = eventops
[k
]->name
;
1080 if (methods
!= NULL
)
1081 mm_free((char**)methods
);
1088 struct event_config
*
1089 event_config_new(void)
1091 struct event_config
*cfg
= mm_calloc(1, sizeof(*cfg
));
1096 TAILQ_INIT(&cfg
->entries
);
1097 cfg
->max_dispatch_interval
.tv_sec
= -1;
1098 cfg
->max_dispatch_callbacks
= INT_MAX
;
1099 cfg
->limit_callbacks_after_prio
= 1;
1105 event_config_entry_free(struct event_config_entry
*entry
)
1107 if (entry
->avoid_method
!= NULL
)
1108 mm_free((char *)entry
->avoid_method
);
1113 event_config_free(struct event_config
*cfg
)
1115 struct event_config_entry
*entry
;
1117 while ((entry
= TAILQ_FIRST(&cfg
->entries
)) != NULL
) {
1118 TAILQ_REMOVE(&cfg
->entries
, entry
, next
);
1119 event_config_entry_free(entry
);
1125 event_config_set_flag(struct event_config
*cfg
, int flag
)
1134 event_config_avoid_method(struct event_config
*cfg
, const char *method
)
1136 struct event_config_entry
*entry
= mm_malloc(sizeof(*entry
));
1140 if ((entry
->avoid_method
= mm_strdup(method
)) == NULL
) {
1145 TAILQ_INSERT_TAIL(&cfg
->entries
, entry
, next
);
1151 event_config_require_features(struct event_config
*cfg
,
1156 cfg
->require_features
= features
;
1161 event_config_set_num_cpus_hint(struct event_config
*cfg
, int cpus
)
1165 cfg
->n_cpus_hint
= cpus
;
1170 event_config_set_max_dispatch_interval(struct event_config
*cfg
,
1171 const struct timeval
*max_interval
, int max_callbacks
, int min_priority
)
1174 memcpy(&cfg
->max_dispatch_interval
, max_interval
,
1175 sizeof(struct timeval
));
1177 cfg
->max_dispatch_interval
.tv_sec
= -1;
1178 cfg
->max_dispatch_callbacks
=
1179 max_callbacks
>= 0 ? max_callbacks
: INT_MAX
;
1180 if (min_priority
< 0)
1182 cfg
->limit_callbacks_after_prio
= min_priority
;
1187 event_priority_init(int npriorities
)
1189 return event_base_priority_init(current_base
, npriorities
);
1193 event_base_priority_init(struct event_base
*base
, int npriorities
)
1198 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
1200 if (N_ACTIVE_CALLBACKS(base
) || npriorities
< 1
1201 || npriorities
>= EVENT_MAX_PRIORITIES
)
1204 if (npriorities
== base
->nactivequeues
)
1207 if (base
->nactivequeues
) {
1208 mm_free(base
->activequeues
);
1209 base
->nactivequeues
= 0;
1212 /* Allocate our priority queues */
1213 base
->activequeues
= (struct evcallback_list
*)
1214 mm_calloc(npriorities
, sizeof(struct evcallback_list
));
1215 if (base
->activequeues
== NULL
) {
1216 event_warn("%s: calloc", __func__
);
1219 base
->nactivequeues
= npriorities
;
1221 for (i
= 0; i
< base
->nactivequeues
; ++i
) {
1222 TAILQ_INIT(&base
->activequeues
[i
]);
1228 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1233 event_base_get_npriorities(struct event_base
*base
)
1238 base
= current_base
;
1240 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
1241 n
= base
->nactivequeues
;
1242 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1247 event_base_get_num_events(struct event_base
*base
, unsigned int type
)
1251 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
1253 if (type
& EVENT_BASE_COUNT_ACTIVE
)
1254 r
+= base
->event_count_active
;
1256 if (type
& EVENT_BASE_COUNT_VIRTUAL
)
1257 r
+= base
->virtual_event_count
;
1259 if (type
& EVENT_BASE_COUNT_ADDED
)
1260 r
+= base
->event_count
;
1262 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1268 event_base_get_max_events(struct event_base
*base
, unsigned int type
, int clear
)
1272 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
1274 if (type
& EVENT_BASE_COUNT_ACTIVE
) {
1275 r
+= base
->event_count_active_max
;
1277 base
->event_count_active_max
= 0;
1280 if (type
& EVENT_BASE_COUNT_VIRTUAL
) {
1281 r
+= base
->virtual_event_count_max
;
1283 base
->virtual_event_count_max
= 0;
1286 if (type
& EVENT_BASE_COUNT_ADDED
) {
1287 r
+= base
->event_count_max
;
1289 base
->event_count_max
= 0;
1292 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1297 /* Returns true iff we're currently watching any events. */
1299 event_haveevents(struct event_base
*base
)
1301 /* Caller must hold th_base_lock */
1302 return (base
->virtual_event_count
> 0 || base
->event_count
> 0);
1305 /* "closure" function called when processing active signal events */
1307 event_signal_closure(struct event_base
*base
, struct event
*ev
)
1312 /* Allows deletes to work */
1313 ncalls
= ev
->ev_ncalls
;
1315 ev
->ev_pncalls
= &ncalls
;
1316 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1319 ev
->ev_ncalls
= ncalls
;
1321 ev
->ev_pncalls
= NULL
;
1322 (*ev
->ev_callback
)(ev
->ev_fd
, ev
->ev_res
, ev
->ev_arg
);
1324 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
1325 should_break
= base
->event_break
;
1326 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1330 ev
->ev_pncalls
= NULL
;
1336 /* Common timeouts are special timeouts that are handled as queues rather than
1337 * in the minheap. This is more efficient than the minheap if we happen to
1338 * know that we're going to get several thousands of timeout events all with
1339 * the same timeout value.
1341 * Since all our timeout handling code assumes timevals can be copied,
1342 * assigned, etc, we can't use "magic pointer" to encode these common
1343 * timeouts. Searching through a list to see if every timeout is common could
1344 * also get inefficient. Instead, we take advantage of the fact that tv_usec
1345 * is 32 bits long, but only uses 20 of those bits (since it can never be over
1346 * 999999.) We use the top bits to encode 4 bites of magic number, and 8 bits
1347 * of index into the event_base's aray of common timeouts.
1350 #define MICROSECONDS_MASK COMMON_TIMEOUT_MICROSECONDS_MASK
1351 #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
1352 #define COMMON_TIMEOUT_IDX_SHIFT 20
1353 #define COMMON_TIMEOUT_MASK 0xf0000000
1354 #define COMMON_TIMEOUT_MAGIC 0x50000000
1356 #define COMMON_TIMEOUT_IDX(tv) \
1357 (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
1359 /** Return true iff if 'tv' is a common timeout in 'base' */
1361 is_common_timeout(const struct timeval
*tv
,
1362 const struct event_base
*base
)
1365 if ((tv
->tv_usec
& COMMON_TIMEOUT_MASK
) != COMMON_TIMEOUT_MAGIC
)
1367 idx
= COMMON_TIMEOUT_IDX(tv
);
1368 return idx
< base
->n_common_timeouts
;
1371 /* True iff tv1 and tv2 have the same common-timeout index, or if neither
1372 * one is a common timeout. */
1374 is_same_common_timeout(const struct timeval
*tv1
, const struct timeval
*tv2
)
1376 return (tv1
->tv_usec
& ~MICROSECONDS_MASK
) ==
1377 (tv2
->tv_usec
& ~MICROSECONDS_MASK
);
1380 /** Requires that 'tv' is a common timeout. Return the corresponding
1381 * common_timeout_list. */
1382 static inline struct common_timeout_list
*
1383 get_common_timeout_list(struct event_base
*base
, const struct timeval
*tv
)
1385 return base
->common_timeout_queues
[COMMON_TIMEOUT_IDX(tv
)];
1390 common_timeout_ok(const struct timeval
*tv
,
1391 struct event_base
*base
)
1393 const struct timeval
*expect
=
1394 &get_common_timeout_list(base
, tv
)->duration
;
1395 return tv
->tv_sec
== expect
->tv_sec
&&
1396 tv
->tv_usec
== expect
->tv_usec
;
1400 /* Add the timeout for the first event in given common timeout list to the
1401 * event_base's minheap. */
1403 common_timeout_schedule(struct common_timeout_list
*ctl
,
1404 const struct timeval
*now
, struct event
*head
)
1406 struct timeval timeout
= head
->ev_timeout
;
1407 timeout
.tv_usec
&= MICROSECONDS_MASK
;
1408 event_add_nolock_(&ctl
->timeout_event
, &timeout
, 1);
1411 /* Callback: invoked when the timeout for a common timeout queue triggers.
1412 * This means that (at least) the first event in that queue should be run,
1413 * and the timeout should be rescheduled if there are more events. */
1415 common_timeout_callback(evutil_socket_t fd
, short what
, void *arg
)
1418 struct common_timeout_list
*ctl
= arg
;
1419 struct event_base
*base
= ctl
->base
;
1420 struct event
*ev
= NULL
;
1421 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
1422 gettime(base
, &now
);
1424 ev
= TAILQ_FIRST(&ctl
->events
);
1425 if (!ev
|| ev
->ev_timeout
.tv_sec
> now
.tv_sec
||
1426 (ev
->ev_timeout
.tv_sec
== now
.tv_sec
&&
1427 (ev
->ev_timeout
.tv_usec
&MICROSECONDS_MASK
) > now
.tv_usec
))
1429 event_del_nolock_(ev
, EVENT_DEL_NOBLOCK
);
1430 event_active_nolock_(ev
, EV_TIMEOUT
, 1);
1433 common_timeout_schedule(ctl
, &now
, ev
);
1434 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1437 #define MAX_COMMON_TIMEOUTS 256
1439 const struct timeval
*
1440 event_base_init_common_timeout(struct event_base
*base
,
1441 const struct timeval
*duration
)
1445 const struct timeval
*result
=NULL
;
1446 struct common_timeout_list
*new_ctl
;
1448 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
1449 if (duration
->tv_usec
> 1000000) {
1450 memcpy(&tv
, duration
, sizeof(struct timeval
));
1451 if (is_common_timeout(duration
, base
))
1452 tv
.tv_usec
&= MICROSECONDS_MASK
;
1453 tv
.tv_sec
+= tv
.tv_usec
/ 1000000;
1454 tv
.tv_usec
%= 1000000;
1457 for (i
= 0; i
< base
->n_common_timeouts
; ++i
) {
1458 const struct common_timeout_list
*ctl
=
1459 base
->common_timeout_queues
[i
];
1460 if (duration
->tv_sec
== ctl
->duration
.tv_sec
&&
1461 duration
->tv_usec
==
1462 (ctl
->duration
.tv_usec
& MICROSECONDS_MASK
)) {
1463 EVUTIL_ASSERT(is_common_timeout(&ctl
->duration
, base
));
1464 result
= &ctl
->duration
;
1468 if (base
->n_common_timeouts
== MAX_COMMON_TIMEOUTS
) {
1469 event_warnx("%s: Too many common timeouts already in use; "
1470 "we only support %d per event_base", __func__
,
1471 MAX_COMMON_TIMEOUTS
);
1474 if (base
->n_common_timeouts_allocated
== base
->n_common_timeouts
) {
1475 int n
= base
->n_common_timeouts
< 16 ? 16 :
1476 base
->n_common_timeouts
*2;
1477 struct common_timeout_list
**newqueues
=
1478 mm_realloc(base
->common_timeout_queues
,
1479 n
*sizeof(struct common_timeout_queue
*));
1481 event_warn("%s: realloc",__func__
);
1484 base
->n_common_timeouts_allocated
= n
;
1485 base
->common_timeout_queues
= newqueues
;
1487 new_ctl
= mm_calloc(1, sizeof(struct common_timeout_list
));
1489 event_warn("%s: calloc",__func__
);
1492 TAILQ_INIT(&new_ctl
->events
);
1493 new_ctl
->duration
.tv_sec
= duration
->tv_sec
;
1494 new_ctl
->duration
.tv_usec
=
1495 duration
->tv_usec
| COMMON_TIMEOUT_MAGIC
|
1496 (base
->n_common_timeouts
<< COMMON_TIMEOUT_IDX_SHIFT
);
1497 evtimer_assign(&new_ctl
->timeout_event
, base
,
1498 common_timeout_callback
, new_ctl
);
1499 new_ctl
->timeout_event
.ev_flags
|= EVLIST_INTERNAL
;
1500 event_priority_set(&new_ctl
->timeout_event
, 0);
1501 new_ctl
->base
= base
;
1502 base
->common_timeout_queues
[base
->n_common_timeouts
++] = new_ctl
;
1503 result
= &new_ctl
->duration
;
1507 EVUTIL_ASSERT(is_common_timeout(result
, base
));
1509 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1513 /* Closure function invoked when we're activating a persistent event. */
1515 event_persist_closure(struct event_base
*base
, struct event
*ev
)
1517 void (*evcb_callback
)(evutil_socket_t
, short, void *);
1519 // Other fields of *ev that must be stored before executing
1520 evutil_socket_t evcb_fd
;
1524 /* reschedule the persistent event if we have a timeout. */
1525 if (ev
->ev_io_timeout
.tv_sec
|| ev
->ev_io_timeout
.tv_usec
) {
1526 /* If there was a timeout, we want it to run at an interval of
1527 * ev_io_timeout after the last time it was _scheduled_ for,
1528 * not ev_io_timeout after _now_. If it fired for another
1529 * reason, though, the timeout ought to start ticking _now_. */
1530 struct timeval run_at
, relative_to
, delay
, now
;
1531 ev_uint32_t usec_mask
= 0;
1532 EVUTIL_ASSERT(is_same_common_timeout(&ev
->ev_timeout
,
1533 &ev
->ev_io_timeout
));
1534 gettime(base
, &now
);
1535 if (is_common_timeout(&ev
->ev_timeout
, base
)) {
1536 delay
= ev
->ev_io_timeout
;
1537 usec_mask
= delay
.tv_usec
& ~MICROSECONDS_MASK
;
1538 delay
.tv_usec
&= MICROSECONDS_MASK
;
1539 if (ev
->ev_res
& EV_TIMEOUT
) {
1540 relative_to
= ev
->ev_timeout
;
1541 relative_to
.tv_usec
&= MICROSECONDS_MASK
;
1546 delay
= ev
->ev_io_timeout
;
1547 if (ev
->ev_res
& EV_TIMEOUT
) {
1548 relative_to
= ev
->ev_timeout
;
1553 evutil_timeradd(&relative_to
, &delay
, &run_at
);
1554 if (evutil_timercmp(&run_at
, &now
, <)) {
1555 /* Looks like we missed at least one invocation due to
1556 * a clock jump, not running the event loop for a
1557 * while, really slow callbacks, or
1558 * something. Reschedule relative to now.
1560 evutil_timeradd(&now
, &delay
, &run_at
);
1562 run_at
.tv_usec
|= usec_mask
;
1563 event_add_nolock_(ev
, &run_at
, 1);
1566 // Save our callback before we release the lock
1567 evcb_callback
= ev
->ev_callback
;
1568 evcb_fd
= ev
->ev_fd
;
1569 evcb_res
= ev
->ev_res
;
1570 evcb_arg
= ev
->ev_arg
;
1573 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1575 // Execute the callback
1576 (evcb_callback
)(evcb_fd
, evcb_res
, evcb_arg
);
1580 Helper for event_process_active to process all the events in a single queue,
1581 releasing the lock as we go. This function requires that the lock be held
1582 when it's invoked. Returns -1 if we get a signal or an event_break that
1583 means we should stop processing any active events now. Otherwise returns
1584 the number of non-internal event_callbacks that we processed.
1587 event_process_active_single_queue(struct event_base
*base
,
1588 struct evcallback_list
*activeq
,
1589 int max_to_process
, const struct timeval
*endtime
)
1591 struct event_callback
*evcb
;
1594 EVUTIL_ASSERT(activeq
!= NULL
);
1596 for (evcb
= TAILQ_FIRST(activeq
); evcb
; evcb
= TAILQ_FIRST(activeq
)) {
1597 struct event
*ev
=NULL
;
1598 if (evcb
->evcb_flags
& EVLIST_INIT
) {
1599 ev
= event_callback_to_event(evcb
);
1601 if (ev
->ev_events
& EV_PERSIST
|| ev
->ev_flags
& EVLIST_FINALIZING
)
1602 event_queue_remove_active(base
, evcb
);
1604 event_del_nolock_(ev
, EVENT_DEL_NOBLOCK
);
1606 "event_process_active: event: %p, %s%s%scall %p",
1608 ev
->ev_res
& EV_READ
? "EV_READ " : " ",
1609 ev
->ev_res
& EV_WRITE
? "EV_WRITE " : " ",
1610 ev
->ev_res
& EV_CLOSED
? "EV_CLOSED " : " ",
1613 event_queue_remove_active(base
, evcb
);
1614 event_debug(("event_process_active: event_callback %p, "
1615 "closure %d, call %p",
1616 evcb
, evcb
->evcb_closure
, evcb
->evcb_cb_union
.evcb_callback
));
1619 if (!(evcb
->evcb_flags
& EVLIST_INTERNAL
))
1623 base
->current_event
= evcb
;
1624 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1625 base
->current_event_waiters
= 0;
1628 switch (evcb
->evcb_closure
) {
1629 case EV_CLOSURE_EVENT_SIGNAL
:
1630 EVUTIL_ASSERT(ev
!= NULL
);
1631 event_signal_closure(base
, ev
);
1633 case EV_CLOSURE_EVENT_PERSIST
:
1634 EVUTIL_ASSERT(ev
!= NULL
);
1635 event_persist_closure(base
, ev
);
1637 case EV_CLOSURE_EVENT
: {
1638 void (*evcb_callback
)(evutil_socket_t
, short, void *);
1639 EVUTIL_ASSERT(ev
!= NULL
);
1640 evcb_callback
= *ev
->ev_callback
;
1641 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1642 evcb_callback(ev
->ev_fd
, ev
->ev_res
, ev
->ev_arg
);
1645 case EV_CLOSURE_CB_SELF
: {
1646 void (*evcb_selfcb
)(struct event_callback
*, void *) = evcb
->evcb_cb_union
.evcb_selfcb
;
1647 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1648 evcb_selfcb(evcb
, evcb
->evcb_arg
);
1651 case EV_CLOSURE_EVENT_FINALIZE
:
1652 case EV_CLOSURE_EVENT_FINALIZE_FREE
: {
1653 void (*evcb_evfinalize
)(struct event
*, void *);
1654 int evcb_closure
= evcb
->evcb_closure
;
1655 EVUTIL_ASSERT(ev
!= NULL
);
1656 base
->current_event
= NULL
;
1657 evcb_evfinalize
= ev
->ev_evcallback
.evcb_cb_union
.evcb_evfinalize
;
1658 EVUTIL_ASSERT((evcb
->evcb_flags
& EVLIST_FINALIZING
));
1659 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1660 evcb_evfinalize(ev
, ev
->ev_arg
);
1661 event_debug_note_teardown_(ev
);
1662 if (evcb_closure
== EV_CLOSURE_EVENT_FINALIZE_FREE
)
1666 case EV_CLOSURE_CB_FINALIZE
: {
1667 void (*evcb_cbfinalize
)(struct event_callback
*, void *) = evcb
->evcb_cb_union
.evcb_cbfinalize
;
1668 base
->current_event
= NULL
;
1669 EVUTIL_ASSERT((evcb
->evcb_flags
& EVLIST_FINALIZING
));
1670 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1671 evcb_cbfinalize(evcb
, evcb
->evcb_arg
);
1678 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
1679 base
->current_event
= NULL
;
1680 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1681 if (base
->current_event_waiters
) {
1682 base
->current_event_waiters
= 0;
1683 EVTHREAD_COND_BROADCAST(base
->current_event_cond
);
1687 if (base
->event_break
)
1689 if (count
>= max_to_process
)
1691 if (count
&& endtime
) {
1693 update_time_cache(base
);
1694 gettime(base
, &now
);
1695 if (evutil_timercmp(&now
, endtime
, >=))
1698 if (base
->event_continue
)
1705 * Active events are stored in priority queues. Lower priorities are always
1706 * process before higher priorities. Low priority events can starve high
1711 event_process_active(struct event_base
*base
)
1713 /* Caller must hold th_base_lock */
1714 struct evcallback_list
*activeq
= NULL
;
1716 const struct timeval
*endtime
;
1718 const int maxcb
= base
->max_dispatch_callbacks
;
1719 const int limit_after_prio
= base
->limit_callbacks_after_prio
;
1720 if (base
->max_dispatch_time
.tv_sec
>= 0) {
1721 update_time_cache(base
);
1723 evutil_timeradd(&base
->max_dispatch_time
, &tv
, &tv
);
1729 for (i
= 0; i
< base
->nactivequeues
; ++i
) {
1730 if (TAILQ_FIRST(&base
->activequeues
[i
]) != NULL
) {
1731 base
->event_running_priority
= i
;
1732 activeq
= &base
->activequeues
[i
];
1733 if (i
< limit_after_prio
)
1734 c
= event_process_active_single_queue(base
, activeq
,
1737 c
= event_process_active_single_queue(base
, activeq
,
1742 break; /* Processed a real event; do not
1743 * consider lower-priority events */
1744 /* If we get here, all of the events we processed
1745 * were internal. Continue. */
1750 base
->event_running_priority
= -1;
1756 * Wait continuously for events. We exit only if no events are left.
1760 event_dispatch(void)
1762 return (event_loop(0));
1766 event_base_dispatch(struct event_base
*event_base
)
1768 return (event_base_loop(event_base
, 0));
1772 event_base_get_method(const struct event_base
*base
)
1774 EVUTIL_ASSERT(base
);
1775 return (base
->evsel
->name
);
1778 /** Callback: used to implement event_base_loopexit by telling the event_base
1779 * that it's time to exit its loop. */
1781 event_loopexit_cb(evutil_socket_t fd
, short what
, void *arg
)
1783 struct event_base
*base
= arg
;
1784 base
->event_gotterm
= 1;
1788 event_loopexit(const struct timeval
*tv
)
1790 return (event_once(-1, EV_TIMEOUT
, event_loopexit_cb
,
1795 event_base_loopexit(struct event_base
*event_base
, const struct timeval
*tv
)
1797 return (event_base_once(event_base
, -1, EV_TIMEOUT
, event_loopexit_cb
,
1802 event_loopbreak(void)
1804 return (event_base_loopbreak(current_base
));
1808 event_base_loopbreak(struct event_base
*event_base
)
1811 if (event_base
== NULL
)
1814 EVBASE_ACQUIRE_LOCK(event_base
, th_base_lock
);
1815 event_base
->event_break
= 1;
1817 if (EVBASE_NEED_NOTIFY(event_base
)) {
1818 r
= evthread_notify_base(event_base
);
1822 EVBASE_RELEASE_LOCK(event_base
, th_base_lock
);
1827 event_base_loopcontinue(struct event_base
*event_base
)
1830 if (event_base
== NULL
)
1833 EVBASE_ACQUIRE_LOCK(event_base
, th_base_lock
);
1834 event_base
->event_continue
= 1;
1836 if (EVBASE_NEED_NOTIFY(event_base
)) {
1837 r
= evthread_notify_base(event_base
);
1841 EVBASE_RELEASE_LOCK(event_base
, th_base_lock
);
1846 event_base_got_break(struct event_base
*event_base
)
1849 EVBASE_ACQUIRE_LOCK(event_base
, th_base_lock
);
1850 res
= event_base
->event_break
;
1851 EVBASE_RELEASE_LOCK(event_base
, th_base_lock
);
1856 event_base_got_exit(struct event_base
*event_base
)
1859 EVBASE_ACQUIRE_LOCK(event_base
, th_base_lock
);
1860 res
= event_base
->event_gotterm
;
1861 EVBASE_RELEASE_LOCK(event_base
, th_base_lock
);
1865 /* not thread safe */
1868 event_loop(int flags
)
1870 return event_base_loop(current_base
, flags
);
1874 event_base_loop(struct event_base
*base
, int flags
)
1876 const struct eventop
*evsel
= base
->evsel
;
1878 struct timeval
*tv_p
;
1879 int res
, done
, retval
= 0;
1881 /* Grab the lock. We will release it inside evsel.dispatch, and again
1882 * as we invoke user callbacks. */
1883 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
1885 if (base
->running_loop
) {
1886 event_warnx("%s: reentrant invocation. Only one event_base_loop"
1887 " can run on each event_base at once.", __func__
);
1888 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1892 base
->running_loop
= 1;
1894 clear_time_cache(base
);
1896 if (base
->sig
.ev_signal_added
&& base
->sig
.ev_n_signals_added
)
1897 evsig_set_base_(base
);
1901 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1902 base
->th_owner_id
= EVTHREAD_GET_ID();
1905 base
->event_gotterm
= base
->event_break
= 0;
1908 base
->event_continue
= 0;
1909 base
->n_deferreds_queued
= 0;
1911 /* Terminate the loop if we have been asked to */
1912 if (base
->event_gotterm
) {
1916 if (base
->event_break
) {
1921 if (!N_ACTIVE_CALLBACKS(base
) && !(flags
& EVLOOP_NONBLOCK
)) {
1922 timeout_next(base
, &tv_p
);
1925 * if we have active events, we just poll new events
1928 evutil_timerclear(&tv
);
1931 /* If we have no events, we just exit */
1932 if (0==(flags
&EVLOOP_NO_EXIT_ON_EMPTY
) &&
1933 !event_haveevents(base
) && !N_ACTIVE_CALLBACKS(base
)) {
1934 event_debug(("%s: no events registered.", __func__
));
1939 event_queue_make_later_events_active(base
);
1941 clear_time_cache(base
);
1943 res
= evsel
->dispatch(base
, tv_p
);
1946 event_debug(("%s: dispatch returned unsuccessfully.",
1952 update_time_cache(base
);
1954 timeout_process(base
);
1956 if (N_ACTIVE_CALLBACKS(base
)) {
1957 int n
= event_process_active(base
);
1958 if ((flags
& EVLOOP_ONCE
)
1959 && N_ACTIVE_CALLBACKS(base
) == 0
1962 } else if (flags
& EVLOOP_NONBLOCK
)
1965 event_debug(("%s: asked to terminate loop.", __func__
));
1968 clear_time_cache(base
);
1969 base
->running_loop
= 0;
1971 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1976 /* One-time callback to implement event_base_once: invokes the user callback,
1977 * then deletes the allocated storage */
1979 event_once_cb(evutil_socket_t fd
, short events
, void *arg
)
1981 struct event_once
*eonce
= arg
;
1983 (*eonce
->cb
)(fd
, events
, eonce
->arg
);
1984 EVBASE_ACQUIRE_LOCK(eonce
->ev
.ev_base
, th_base_lock
);
1985 LIST_REMOVE(eonce
, next_once
);
1986 EVBASE_RELEASE_LOCK(eonce
->ev
.ev_base
, th_base_lock
);
1987 event_debug_unassign(&eonce
->ev
);
1991 /* not threadsafe, event scheduled once. */
1993 event_once(evutil_socket_t fd
, short events
,
1994 void (*callback
)(evutil_socket_t
, short, void *),
1995 void *arg
, const struct timeval
*tv
)
1997 return event_base_once(current_base
, fd
, events
, callback
, arg
, tv
);
2000 /* Schedules an event once */
2002 event_base_once(struct event_base
*base
, evutil_socket_t fd
, short events
,
2003 void (*callback
)(evutil_socket_t
, short, void *),
2004 void *arg
, const struct timeval
*tv
)
2006 struct event_once
*eonce
;
2010 /* We cannot support signals that just fire once, or persistent
2012 if (events
& (EV_SIGNAL
|EV_PERSIST
))
2015 if ((eonce
= mm_calloc(1, sizeof(struct event_once
))) == NULL
)
2018 eonce
->cb
= callback
;
2021 if ((events
& (EV_TIMEOUT
|EV_SIGNAL
|EV_READ
|EV_WRITE
|EV_CLOSED
)) == EV_TIMEOUT
) {
2022 evtimer_assign(&eonce
->ev
, base
, event_once_cb
, eonce
);
2024 if (tv
== NULL
|| ! evutil_timerisset(tv
)) {
2025 /* If the event is going to become active immediately,
2026 * don't put it on the timeout queue. This is one
2027 * idiom for scheduling a callback, so let's make
2028 * it fast (and order-preserving). */
2031 } else if (events
& (EV_READ
|EV_WRITE
|EV_CLOSED
)) {
2032 events
&= EV_READ
|EV_WRITE
|EV_CLOSED
;
2034 event_assign(&eonce
->ev
, base
, fd
, events
, event_once_cb
, eonce
);
2036 /* Bad event combination */
2042 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
2044 event_active_nolock_(&eonce
->ev
, EV_TIMEOUT
, 1);
2046 res
= event_add_nolock_(&eonce
->ev
, tv
, 0);
2052 LIST_INSERT_HEAD(&base
->once_events
, eonce
, next_once
);
2054 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
2061 event_assign(struct event
*ev
, struct event_base
*base
, evutil_socket_t fd
, short events
, void (*callback
)(evutil_socket_t
, short, void *), void *arg
)
2064 base
= current_base
;
2065 if (arg
== &event_self_cbarg_ptr_
)
2068 event_debug_assert_not_added_(ev
);
2072 ev
->ev_callback
= callback
;
2075 ev
->ev_events
= events
;
2077 ev
->ev_flags
= EVLIST_INIT
;
2079 ev
->ev_pncalls
= NULL
;
2081 if (events
& EV_SIGNAL
) {
2082 if ((events
& (EV_READ
|EV_WRITE
|EV_CLOSED
)) != 0) {
2083 event_warnx("%s: EV_SIGNAL is not compatible with "
2084 "EV_READ, EV_WRITE or EV_CLOSED", __func__
);
2087 ev
->ev_closure
= EV_CLOSURE_EVENT_SIGNAL
;
2089 if (events
& EV_PERSIST
) {
2090 evutil_timerclear(&ev
->ev_io_timeout
);
2091 ev
->ev_closure
= EV_CLOSURE_EVENT_PERSIST
;
2093 ev
->ev_closure
= EV_CLOSURE_EVENT
;
2097 min_heap_elem_init_(ev
);
2100 /* by default, we put new events into the middle priority */
2101 ev
->ev_pri
= base
->nactivequeues
/ 2;
2104 event_debug_note_setup_(ev
);
2110 event_base_set(struct event_base
*base
, struct event
*ev
)
2112 /* Only innocent events may be assigned to a different base */
2113 if (ev
->ev_flags
!= EVLIST_INIT
)
2116 event_debug_assert_is_setup_(ev
);
2119 ev
->ev_pri
= base
->nactivequeues
/2;
2125 event_set(struct event
*ev
, evutil_socket_t fd
, short events
,
2126 void (*callback
)(evutil_socket_t
, short, void *), void *arg
)
2129 r
= event_assign(ev
, current_base
, fd
, events
, callback
, arg
);
2130 EVUTIL_ASSERT(r
== 0);
2134 event_self_cbarg(void)
2136 return &event_self_cbarg_ptr_
;
2140 event_base_get_running_event(struct event_base
*base
)
2142 struct event
*ev
= NULL
;
2143 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
2144 if (EVBASE_IN_THREAD(base
)) {
2145 struct event_callback
*evcb
= base
->current_event
;
2146 if (evcb
->evcb_flags
& EVLIST_INIT
)
2147 ev
= event_callback_to_event(evcb
);
2149 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
2154 event_new(struct event_base
*base
, evutil_socket_t fd
, short events
, void (*cb
)(evutil_socket_t
, short, void *), void *arg
)
2157 ev
= mm_malloc(sizeof(struct event
));
2160 if (event_assign(ev
, base
, fd
, events
, cb
, arg
) < 0) {
2169 event_free(struct event
*ev
)
2171 /* This is disabled, so that events which have been finalized be a
2172 * valid target for event_free(). That's */
2173 // event_debug_assert_is_setup_(ev);
2175 /* make sure that this event won't be coming back to haunt us. */
2177 event_debug_note_teardown_(ev
);
2183 event_debug_unassign(struct event
*ev
)
2185 event_debug_assert_not_added_(ev
);
2186 event_debug_note_teardown_(ev
);
2188 ev
->ev_flags
&= ~EVLIST_INIT
;
2191 #define EVENT_FINALIZE_FREE_ 0x10000
2193 event_finalize_nolock_(struct event_base
*base
, unsigned flags
, struct event
*ev
, event_finalize_callback_fn cb
)
2195 ev_uint8_t closure
= (flags
& EVENT_FINALIZE_FREE_
) ?
2196 EV_CLOSURE_EVENT_FINALIZE_FREE
: EV_CLOSURE_EVENT_FINALIZE
;
2198 event_del_nolock_(ev
, EVENT_DEL_NOBLOCK
);
2199 ev
->ev_closure
= closure
;
2200 ev
->ev_evcallback
.evcb_cb_union
.evcb_evfinalize
= cb
;
2201 event_active_nolock_(ev
, EV_FINALIZE
, 1);
2202 ev
->ev_flags
|= EVLIST_FINALIZING
;
2207 event_finalize_impl_(unsigned flags
, struct event
*ev
, event_finalize_callback_fn cb
)
2210 struct event_base
*base
= ev
->ev_base
;
2211 if (EVUTIL_FAILURE_CHECK(!base
)) {
2212 event_warnx("%s: event has no event_base set.", __func__
);
2216 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
2217 r
= event_finalize_nolock_(base
, flags
, ev
, cb
);
2218 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
2223 event_finalize(unsigned flags
, struct event
*ev
, event_finalize_callback_fn cb
)
2225 return event_finalize_impl_(flags
, ev
, cb
);
2229 event_free_finalize(unsigned flags
, struct event
*ev
, event_finalize_callback_fn cb
)
2231 return event_finalize_impl_(flags
|EVENT_FINALIZE_FREE_
, ev
, cb
);
2235 event_callback_finalize_nolock_(struct event_base
*base
, unsigned flags
, struct event_callback
*evcb
, void (*cb
)(struct event_callback
*, void *))
2237 struct event
*ev
= NULL
;
2238 if (evcb
->evcb_flags
& EVLIST_INIT
) {
2239 ev
= event_callback_to_event(evcb
);
2240 event_del_nolock_(ev
, EVENT_DEL_NOBLOCK
);
2242 event_callback_cancel_nolock_(base
, evcb
, 0); /*XXX can this fail?*/
2245 evcb
->evcb_closure
= EV_CLOSURE_CB_FINALIZE
;
2246 evcb
->evcb_cb_union
.evcb_cbfinalize
= cb
;
2247 event_callback_activate_nolock_(base
, evcb
); /* XXX can this really fail?*/
2248 evcb
->evcb_flags
|= EVLIST_FINALIZING
;
2252 event_callback_finalize_(struct event_base
*base
, unsigned flags
, struct event_callback
*evcb
, void (*cb
)(struct event_callback
*, void *))
2254 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
2255 event_callback_finalize_nolock_(base
, flags
, evcb
, cb
);
2256 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
2259 /** Internal: Finalize all of the n_cbs callbacks in evcbs. The provided
2260 * callback will be invoked on *one of them*, after they have *all* been
2263 event_callback_finalize_many_(struct event_base
*base
, int n_cbs
, struct event_callback
**evcbs
, void (*cb
)(struct event_callback
*, void *))
2265 int n_pending
= 0, i
;
2268 base
= current_base
;
2270 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
2272 event_debug(("%s: %d events finalizing", __func__
, n_cbs
));
2274 /* At most one can be currently executing; the rest we just
2275 * cancel... But we always make sure that the finalize callback
2277 for (i
= 0; i
< n_cbs
; ++i
) {
2278 struct event_callback
*evcb
= evcbs
[i
];
2279 if (evcb
== base
->current_event
) {
2280 event_callback_finalize_nolock_(base
, 0, evcb
, cb
);
2283 event_callback_cancel_nolock_(base
, evcb
, 0);
2287 if (n_pending
== 0) {
2288 /* Just do the first one. */
2289 event_callback_finalize_nolock_(base
, 0, evcbs
[0], cb
);
2292 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
2297 * Set's the priority of an event - if an event is already scheduled
2298 * changing the priority is going to fail.
2302 event_priority_set(struct event
*ev
, int pri
)
2304 event_debug_assert_is_setup_(ev
);
2306 if (ev
->ev_flags
& EVLIST_ACTIVE
)
2308 if (pri
< 0 || pri
>= ev
->ev_base
->nactivequeues
)
2317 * Checks if a specific event is pending or scheduled.
2321 event_pending(const struct event
*ev
, short event
, struct timeval
*tv
)
2325 if (EVUTIL_FAILURE_CHECK(ev
->ev_base
== NULL
)) {
2326 event_warnx("%s: event has no event_base set.", __func__
);
2330 EVBASE_ACQUIRE_LOCK(ev
->ev_base
, th_base_lock
);
2331 event_debug_assert_is_setup_(ev
);
2333 if (ev
->ev_flags
& EVLIST_INSERTED
)
2334 flags
|= (ev
->ev_events
& (EV_READ
|EV_WRITE
|EV_CLOSED
|EV_SIGNAL
));
2335 if (ev
->ev_flags
& (EVLIST_ACTIVE
|EVLIST_ACTIVE_LATER
))
2336 flags
|= ev
->ev_res
;
2337 if (ev
->ev_flags
& EVLIST_TIMEOUT
)
2338 flags
|= EV_TIMEOUT
;
2340 event
&= (EV_TIMEOUT
|EV_READ
|EV_WRITE
|EV_CLOSED
|EV_SIGNAL
);
2342 /* See if there is a timeout that we should report */
2343 if (tv
!= NULL
&& (flags
& event
& EV_TIMEOUT
)) {
2344 struct timeval tmp
= ev
->ev_timeout
;
2345 tmp
.tv_usec
&= MICROSECONDS_MASK
;
2346 /* correctly remamp to real time */
2347 evutil_timeradd(&ev
->ev_base
->tv_clock_diff
, &tmp
, tv
);
2350 EVBASE_RELEASE_LOCK(ev
->ev_base
, th_base_lock
);
2352 return (flags
& event
);
2356 event_initialized(const struct event
*ev
)
2358 if (!(ev
->ev_flags
& EVLIST_INIT
))
2365 event_get_assignment(const struct event
*event
, struct event_base
**base_out
, evutil_socket_t
*fd_out
, short *events_out
, event_callback_fn
*callback_out
, void **arg_out
)
2367 event_debug_assert_is_setup_(event
);
2370 *base_out
= event
->ev_base
;
2372 *fd_out
= event
->ev_fd
;
2374 *events_out
= event
->ev_events
;
2376 *callback_out
= event
->ev_callback
;
2378 *arg_out
= event
->ev_arg
;
2382 event_get_struct_event_size(void)
2384 return sizeof(struct event
);
2388 event_get_fd(const struct event
*ev
)
2390 event_debug_assert_is_setup_(ev
);
2395 event_get_base(const struct event
*ev
)
2397 event_debug_assert_is_setup_(ev
);
2402 event_get_events(const struct event
*ev
)
2404 event_debug_assert_is_setup_(ev
);
2405 return ev
->ev_events
;
2409 event_get_callback(const struct event
*ev
)
2411 event_debug_assert_is_setup_(ev
);
2412 return ev
->ev_callback
;
2416 event_get_callback_arg(const struct event
*ev
)
2418 event_debug_assert_is_setup_(ev
);
2423 event_get_priority(const struct event
*ev
)
2425 event_debug_assert_is_setup_(ev
);
2430 event_add(struct event
*ev
, const struct timeval
*tv
)
2434 if (EVUTIL_FAILURE_CHECK(!ev
->ev_base
)) {
2435 event_warnx("%s: event has no event_base set.", __func__
);
2439 EVBASE_ACQUIRE_LOCK(ev
->ev_base
, th_base_lock
);
2441 res
= event_add_nolock_(ev
, tv
, 0);
2443 EVBASE_RELEASE_LOCK(ev
->ev_base
, th_base_lock
);
2448 /* Helper callback: wake an event_base from another thread. This version
2449 * works by writing a byte to one end of a socketpair, so that the event_base
2450 * listening on the other end will wake up as the corresponding event
2453 evthread_notify_base_default(struct event_base
*base
)
2459 r
= send(base
->th_notify_fd
[1], buf
, 1, 0);
2461 r
= write(base
->th_notify_fd
[1], buf
, 1);
2463 return (r
< 0 && ! EVUTIL_ERR_IS_EAGAIN(errno
)) ? -1 : 0;
2466 #ifdef EVENT__HAVE_EVENTFD
2467 /* Helper callback: wake an event_base from another thread. This version
2468 * assumes that you have a working eventfd() implementation. */
2470 evthread_notify_base_eventfd(struct event_base
*base
)
2472 ev_uint64_t msg
= 1;
2475 r
= write(base
->th_notify_fd
[0], (void*) &msg
, sizeof(msg
));
2476 } while (r
< 0 && errno
== EAGAIN
);
2478 return (r
< 0) ? -1 : 0;
2483 /** Tell the thread currently running the event_loop for base (if any) that it
2484 * needs to stop waiting in its dispatch function (if it is) and process all
2485 * active callbacks. */
2487 evthread_notify_base(struct event_base
*base
)
2489 EVENT_BASE_ASSERT_LOCKED(base
);
2490 if (!base
->th_notify_fn
)
2492 if (base
->is_notify_pending
)
2494 base
->is_notify_pending
= 1;
2495 return base
->th_notify_fn(base
);
2498 /* Implementation function to remove a timeout on a currently pending event.
2501 event_remove_timer_nolock_(struct event
*ev
)
2503 struct event_base
*base
= ev
->ev_base
;
2505 EVENT_BASE_ASSERT_LOCKED(base
);
2506 event_debug_assert_is_setup_(ev
);
2508 event_debug(("event_remove_timer_nolock: event: %p", ev
));
2510 /* If it's not pending on a timeout, we don't need to do anything. */
2511 if (ev
->ev_flags
& EVLIST_TIMEOUT
) {
2512 event_queue_remove_timeout(base
, ev
);
2513 evutil_timerclear(&ev
->ev_
.ev_io
.ev_timeout
);
2520 event_remove_timer(struct event
*ev
)
2524 if (EVUTIL_FAILURE_CHECK(!ev
->ev_base
)) {
2525 event_warnx("%s: event has no event_base set.", __func__
);
2529 EVBASE_ACQUIRE_LOCK(ev
->ev_base
, th_base_lock
);
2531 res
= event_remove_timer_nolock_(ev
);
2533 EVBASE_RELEASE_LOCK(ev
->ev_base
, th_base_lock
);
2538 /* Implementation function to add an event. Works just like event_add,
2539 * except: 1) it requires that we have the lock. 2) if tv_is_absolute is set,
2540 * we treat tv as an absolute time, not as an interval to add to the current
2543 event_add_nolock_(struct event
*ev
, const struct timeval
*tv
,
2546 struct event_base
*base
= ev
->ev_base
;
2550 EVENT_BASE_ASSERT_LOCKED(base
);
2551 event_debug_assert_is_setup_(ev
);
2554 "event_add: event: %p (fd "EV_SOCK_FMT
"), %s%s%s%scall %p",
2556 EV_SOCK_ARG(ev
->ev_fd
),
2557 ev
->ev_events
& EV_READ
? "EV_READ " : " ",
2558 ev
->ev_events
& EV_WRITE
? "EV_WRITE " : " ",
2559 ev
->ev_events
& EV_CLOSED
? "EV_CLOSED " : " ",
2560 tv
? "EV_TIMEOUT " : " ",
2563 EVUTIL_ASSERT(!(ev
->ev_flags
& ~EVLIST_ALL
));
2565 if (ev
->ev_flags
& EVLIST_FINALIZING
) {
2571 * prepare for timeout insertion further below, if we get a
2572 * failure on any step, we should not change any state.
2574 if (tv
!= NULL
&& !(ev
->ev_flags
& EVLIST_TIMEOUT
)) {
2575 if (min_heap_reserve_(&base
->timeheap
,
2576 1 + min_heap_size_(&base
->timeheap
)) == -1)
2577 return (-1); /* ENOMEM == errno */
2580 /* If the main thread is currently executing a signal event's
2581 * callback, and we are not the main thread, then we want to wait
2582 * until the callback is done before we mess with the event, or else
2583 * we can race on ev_ncalls and ev_pncalls below. */
2584 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2585 if (base
->current_event
== event_to_event_callback(ev
) &&
2586 (ev
->ev_events
& EV_SIGNAL
)
2587 && !EVBASE_IN_THREAD(base
)) {
2588 ++base
->current_event_waiters
;
2589 EVTHREAD_COND_WAIT(base
->current_event_cond
, base
->th_base_lock
);
2593 if ((ev
->ev_events
& (EV_READ
|EV_WRITE
|EV_CLOSED
|EV_SIGNAL
)) &&
2594 !(ev
->ev_flags
& (EVLIST_INSERTED
|EVLIST_ACTIVE
|EVLIST_ACTIVE_LATER
))) {
2595 if (ev
->ev_events
& (EV_READ
|EV_WRITE
|EV_CLOSED
))
2596 res
= evmap_io_add_(base
, ev
->ev_fd
, ev
);
2597 else if (ev
->ev_events
& EV_SIGNAL
)
2598 res
= evmap_signal_add_(base
, (int)ev
->ev_fd
, ev
);
2600 event_queue_insert_inserted(base
, ev
);
2602 /* evmap says we need to notify the main thread. */
2609 * we should change the timeout state only if the previous event
2610 * addition succeeded.
2612 if (res
!= -1 && tv
!= NULL
) {
2615 #ifdef USE_REINSERT_TIMEOUT
2617 int old_timeout_idx
;
2621 * for persistent timeout events, we remember the
2622 * timeout value and re-add the event.
2624 * If tv_is_absolute, this was already set.
2626 if (ev
->ev_closure
== EV_CLOSURE_EVENT_PERSIST
&& !tv_is_absolute
)
2627 ev
->ev_io_timeout
= *tv
;
2629 #ifndef USE_REINSERT_TIMEOUT
2630 if (ev
->ev_flags
& EVLIST_TIMEOUT
) {
2631 event_queue_remove_timeout(base
, ev
);
2635 /* Check if it is active due to a timeout. Rescheduling
2636 * this timeout before the callback can be executed
2637 * removes it from the active list. */
2638 if ((ev
->ev_flags
& EVLIST_ACTIVE
) &&
2639 (ev
->ev_res
& EV_TIMEOUT
)) {
2640 if (ev
->ev_events
& EV_SIGNAL
) {
2641 /* See if we are just active executing
2642 * this event in a loop
2644 if (ev
->ev_ncalls
&& ev
->ev_pncalls
) {
2646 *ev
->ev_pncalls
= 0;
2650 event_queue_remove_active(base
, event_to_event_callback(ev
));
2653 gettime(base
, &now
);
2655 common_timeout
= is_common_timeout(tv
, base
);
2656 #ifdef USE_REINSERT_TIMEOUT
2657 was_common
= is_common_timeout(&ev
->ev_timeout
, base
);
2658 old_timeout_idx
= COMMON_TIMEOUT_IDX(&ev
->ev_timeout
);
2661 if (tv_is_absolute
) {
2662 ev
->ev_timeout
= *tv
;
2663 } else if (common_timeout
) {
2664 struct timeval tmp
= *tv
;
2665 tmp
.tv_usec
&= MICROSECONDS_MASK
;
2666 evutil_timeradd(&now
, &tmp
, &ev
->ev_timeout
);
2667 ev
->ev_timeout
.tv_usec
|=
2668 (tv
->tv_usec
& ~MICROSECONDS_MASK
);
2670 evutil_timeradd(&now
, tv
, &ev
->ev_timeout
);
2674 "event_add: event %p, timeout in %d seconds %d useconds, call %p",
2675 ev
, (int)tv
->tv_sec
, (int)tv
->tv_usec
, ev
->ev_callback
));
2677 #ifdef USE_REINSERT_TIMEOUT
2678 event_queue_reinsert_timeout(base
, ev
, was_common
, common_timeout
, old_timeout_idx
);
2680 event_queue_insert_timeout(base
, ev
);
2683 if (common_timeout
) {
2684 struct common_timeout_list
*ctl
=
2685 get_common_timeout_list(base
, &ev
->ev_timeout
);
2686 if (ev
== TAILQ_FIRST(&ctl
->events
)) {
2687 common_timeout_schedule(ctl
, &now
, ev
);
2690 struct event
* top
= NULL
;
2691 /* See if the earliest timeout is now earlier than it
2692 * was before: if so, we will need to tell the main
2693 * thread to wake up earlier than it would otherwise.
2694 * We double check the timeout of the top element to
2695 * handle time distortions due to system suspension.
2697 if (min_heap_elt_is_top_(ev
))
2699 else if ((top
= min_heap_top_(&base
->timeheap
)) != NULL
&&
2700 evutil_timercmp(&top
->ev_timeout
, &now
, <))
2705 /* if we are not in the right thread, we need to wake up the loop */
2706 if (res
!= -1 && notify
&& EVBASE_NEED_NOTIFY(base
))
2707 evthread_notify_base(base
);
2709 event_debug_note_add_(ev
);
2715 event_del_(struct event
*ev
, int blocking
)
2719 if (EVUTIL_FAILURE_CHECK(!ev
->ev_base
)) {
2720 event_warnx("%s: event has no event_base set.", __func__
);
2724 EVBASE_ACQUIRE_LOCK(ev
->ev_base
, th_base_lock
);
2726 res
= event_del_nolock_(ev
, blocking
);
2728 EVBASE_RELEASE_LOCK(ev
->ev_base
, th_base_lock
);
2734 event_del(struct event
*ev
)
2736 return event_del_(ev
, EVENT_DEL_AUTOBLOCK
);
2740 event_del_block(struct event
*ev
)
2742 return event_del_(ev
, EVENT_DEL_BLOCK
);
2746 event_del_noblock(struct event
*ev
)
2748 return event_del_(ev
, EVENT_DEL_NOBLOCK
);
2751 /** Helper for event_del: always called with th_base_lock held.
2753 * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
2754 * EVEN_IF_FINALIZING} values. See those for more information.
2757 event_del_nolock_(struct event
*ev
, int blocking
)
2759 struct event_base
*base
;
2760 int res
= 0, notify
= 0;
2762 event_debug(("event_del: %p (fd "EV_SOCK_FMT
"), callback %p",
2763 ev
, EV_SOCK_ARG(ev
->ev_fd
), ev
->ev_callback
));
2765 /* An event without a base has not been added */
2766 if (ev
->ev_base
== NULL
)
2769 EVENT_BASE_ASSERT_LOCKED(ev
->ev_base
);
2771 if (blocking
!= EVENT_DEL_EVEN_IF_FINALIZING
) {
2772 if (ev
->ev_flags
& EVLIST_FINALIZING
) {
2778 /* If the main thread is currently executing this event's callback,
2779 * and we are not the main thread, then we want to wait until the
2780 * callback is done before we start removing the event. That way,
2781 * when this function returns, it will be safe to free the
2782 * user-supplied argument. */
2784 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2785 if (blocking
!= EVENT_DEL_NOBLOCK
&&
2786 base
->current_event
== event_to_event_callback(ev
) &&
2787 !EVBASE_IN_THREAD(base
) &&
2788 (blocking
== EVENT_DEL_BLOCK
|| !(ev
->ev_events
& EV_FINALIZE
))) {
2789 ++base
->current_event_waiters
;
2790 EVTHREAD_COND_WAIT(base
->current_event_cond
, base
->th_base_lock
);
2794 EVUTIL_ASSERT(!(ev
->ev_flags
& ~EVLIST_ALL
));
2796 /* See if we are just active executing this event in a loop */
2797 if (ev
->ev_events
& EV_SIGNAL
) {
2798 if (ev
->ev_ncalls
&& ev
->ev_pncalls
) {
2800 *ev
->ev_pncalls
= 0;
2804 if (ev
->ev_flags
& EVLIST_TIMEOUT
) {
2805 /* NOTE: We never need to notify the main thread because of a
2806 * deleted timeout event: all that could happen if we don't is
2807 * that the dispatch loop might wake up too early. But the
2808 * point of notifying the main thread _is_ to wake up the
2809 * dispatch loop early anyway, so we wouldn't gain anything by
2812 event_queue_remove_timeout(base
, ev
);
2815 if (ev
->ev_flags
& EVLIST_ACTIVE
)
2816 event_queue_remove_active(base
, event_to_event_callback(ev
));
2817 else if (ev
->ev_flags
& EVLIST_ACTIVE_LATER
)
2818 event_queue_remove_active_later(base
, event_to_event_callback(ev
));
2820 if (ev
->ev_flags
& EVLIST_INSERTED
) {
2821 event_queue_remove_inserted(base
, ev
);
2822 if (ev
->ev_events
& (EV_READ
|EV_WRITE
|EV_CLOSED
))
2823 res
= evmap_io_del_(base
, ev
->ev_fd
, ev
);
2825 res
= evmap_signal_del_(base
, (int)ev
->ev_fd
, ev
);
2827 /* evmap says we need to notify the main thread. */
2833 /* if we are not in the right thread, we need to wake up the loop */
2834 if (res
!= -1 && notify
&& EVBASE_NEED_NOTIFY(base
))
2835 evthread_notify_base(base
);
2837 event_debug_note_del_(ev
);
2843 event_active(struct event
*ev
, int res
, short ncalls
)
2845 if (EVUTIL_FAILURE_CHECK(!ev
->ev_base
)) {
2846 event_warnx("%s: event has no event_base set.", __func__
);
2850 EVBASE_ACQUIRE_LOCK(ev
->ev_base
, th_base_lock
);
2852 event_debug_assert_is_setup_(ev
);
2854 event_active_nolock_(ev
, res
, ncalls
);
2856 EVBASE_RELEASE_LOCK(ev
->ev_base
, th_base_lock
);
2861 event_active_nolock_(struct event
*ev
, int res
, short ncalls
)
2863 struct event_base
*base
;
2865 event_debug(("event_active: %p (fd "EV_SOCK_FMT
"), res %d, callback %p",
2866 ev
, EV_SOCK_ARG(ev
->ev_fd
), (int)res
, ev
->ev_callback
));
2869 EVENT_BASE_ASSERT_LOCKED(base
);
2871 if (ev
->ev_flags
& EVLIST_FINALIZING
) {
2876 switch ((ev
->ev_flags
& (EVLIST_ACTIVE
|EVLIST_ACTIVE_LATER
))) {
2878 case EVLIST_ACTIVE
|EVLIST_ACTIVE_LATER
:
2882 /* We get different kinds of events, add them together */
2885 case EVLIST_ACTIVE_LATER
:
2893 if (ev
->ev_pri
< base
->event_running_priority
)
2894 base
->event_continue
= 1;
2896 if (ev
->ev_events
& EV_SIGNAL
) {
2897 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2898 if (base
->current_event
== event_to_event_callback(ev
) &&
2899 !EVBASE_IN_THREAD(base
)) {
2900 ++base
->current_event_waiters
;
2901 EVTHREAD_COND_WAIT(base
->current_event_cond
, base
->th_base_lock
);
2904 ev
->ev_ncalls
= ncalls
;
2905 ev
->ev_pncalls
= NULL
;
2908 event_callback_activate_nolock_(base
, event_to_event_callback(ev
));
2912 event_active_later_(struct event
*ev
, int res
)
2914 EVBASE_ACQUIRE_LOCK(ev
->ev_base
, th_base_lock
);
2915 event_active_later_nolock_(ev
, res
);
2916 EVBASE_RELEASE_LOCK(ev
->ev_base
, th_base_lock
);
2920 event_active_later_nolock_(struct event
*ev
, int res
)
2922 struct event_base
*base
= ev
->ev_base
;
2923 EVENT_BASE_ASSERT_LOCKED(base
);
2925 if (ev
->ev_flags
& (EVLIST_ACTIVE
|EVLIST_ACTIVE_LATER
)) {
2926 /* We get different kinds of events, add them together */
2933 event_callback_activate_later_nolock_(base
, event_to_event_callback(ev
));
2937 event_callback_activate_(struct event_base
*base
,
2938 struct event_callback
*evcb
)
2941 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
2942 r
= event_callback_activate_nolock_(base
, evcb
);
2943 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
2948 event_callback_activate_nolock_(struct event_base
*base
,
2949 struct event_callback
*evcb
)
2953 if (evcb
->evcb_flags
& EVLIST_FINALIZING
)
2956 switch (evcb
->evcb_flags
& (EVLIST_ACTIVE
|EVLIST_ACTIVE_LATER
)) {
2959 case EVLIST_ACTIVE_LATER
:
2960 event_queue_remove_active_later(base
, evcb
);
2969 event_queue_insert_active(base
, evcb
);
2971 if (EVBASE_NEED_NOTIFY(base
))
2972 evthread_notify_base(base
);
2978 event_callback_activate_later_nolock_(struct event_base
*base
,
2979 struct event_callback
*evcb
)
2981 if (evcb
->evcb_flags
& (EVLIST_ACTIVE
|EVLIST_ACTIVE_LATER
))
2984 event_queue_insert_active_later(base
, evcb
);
2985 if (EVBASE_NEED_NOTIFY(base
))
2986 evthread_notify_base(base
);
2991 event_callback_init_(struct event_base
*base
,
2992 struct event_callback
*cb
)
2994 memset(cb
, 0, sizeof(*cb
));
2995 cb
->evcb_pri
= base
->nactivequeues
- 1;
2999 event_callback_cancel_(struct event_base
*base
,
3000 struct event_callback
*evcb
)
3003 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
3004 r
= event_callback_cancel_nolock_(base
, evcb
, 0);
3005 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
3010 event_callback_cancel_nolock_(struct event_base
*base
,
3011 struct event_callback
*evcb
, int even_if_finalizing
)
3013 if ((evcb
->evcb_flags
& EVLIST_FINALIZING
) && !even_if_finalizing
)
3016 if (evcb
->evcb_flags
& EVLIST_INIT
)
3017 return event_del_nolock_(event_callback_to_event(evcb
),
3018 even_if_finalizing
? EVENT_DEL_EVEN_IF_FINALIZING
: EVENT_DEL_AUTOBLOCK
);
3020 switch ((evcb
->evcb_flags
& (EVLIST_ACTIVE
|EVLIST_ACTIVE_LATER
))) {
3022 case EVLIST_ACTIVE
|EVLIST_ACTIVE_LATER
:
3026 /* We get different kinds of events, add them together */
3027 event_queue_remove_active(base
, evcb
);
3029 case EVLIST_ACTIVE_LATER
:
3030 event_queue_remove_active_later(base
, evcb
);
3040 event_deferred_cb_init_(struct event_callback
*cb
, ev_uint8_t priority
, deferred_cb_fn fn
, void *arg
)
3042 memset(cb
, 0, sizeof(*cb
));
3043 cb
->evcb_cb_union
.evcb_selfcb
= fn
;
3045 cb
->evcb_pri
= priority
;
3046 cb
->evcb_closure
= EV_CLOSURE_CB_SELF
;
3050 event_deferred_cb_set_priority_(struct event_callback
*cb
, ev_uint8_t priority
)
3052 cb
->evcb_pri
= priority
;
3056 event_deferred_cb_cancel_(struct event_base
*base
, struct event_callback
*cb
)
3059 base
= current_base
;
3060 event_callback_cancel_(base
, cb
);
3063 #define MAX_DEFERREDS_QUEUED 32
3065 event_deferred_cb_schedule_(struct event_base
*base
, struct event_callback
*cb
)
3069 base
= current_base
;
3070 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
3071 if (base
->n_deferreds_queued
> MAX_DEFERREDS_QUEUED
) {
3072 r
= event_callback_activate_later_nolock_(base
, cb
);
3074 r
= event_callback_activate_nolock_(base
, cb
);
3076 ++base
->n_deferreds_queued
;
3079 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
3084 timeout_next(struct event_base
*base
, struct timeval
**tv_p
)
3086 /* Caller must hold th_base_lock */
3089 struct timeval
*tv
= *tv_p
;
3092 ev
= min_heap_top_(&base
->timeheap
);
3095 /* if no time-based events are active wait for I/O */
3100 if (gettime(base
, &now
) == -1) {
3105 if (evutil_timercmp(&ev
->ev_timeout
, &now
, <=)) {
3106 evutil_timerclear(tv
);
3110 evutil_timersub(&ev
->ev_timeout
, &now
, tv
);
3112 EVUTIL_ASSERT(tv
->tv_sec
>= 0);
3113 EVUTIL_ASSERT(tv
->tv_usec
>= 0);
3114 event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev
, (int)tv
->tv_sec
, (int)tv
->tv_usec
));
3120 /* Activate every event whose timeout has elapsed. */
3122 timeout_process(struct event_base
*base
)
3124 /* Caller must hold lock. */
3128 if (min_heap_empty_(&base
->timeheap
)) {
3132 gettime(base
, &now
);
3134 while ((ev
= min_heap_top_(&base
->timeheap
))) {
3135 if (evutil_timercmp(&ev
->ev_timeout
, &now
, >))
3138 /* delete this event from the I/O queues */
3139 event_del_nolock_(ev
, EVENT_DEL_NOBLOCK
);
3141 event_debug(("timeout_process: event: %p, call %p",
3142 ev
, ev
->ev_callback
));
3143 event_active_nolock_(ev
, EV_TIMEOUT
, 1);
3147 #if (EVLIST_INTERNAL >> 4) != 1
3148 #error "Mismatch for value of EVLIST_INTERNAL"
3152 #define MAX(a,b) (((a)>(b))?(a):(b))
3155 #define MAX_EVENT_COUNT(var, v) var = MAX(var, v)
3157 /* These are a fancy way to spell
3158 if (flags & EVLIST_INTERNAL)
3159 base->event_count--/++;
3161 #define DECR_EVENT_COUNT(base,flags) \
3162 ((base)->event_count -= (~((flags) >> 4) & 1))
3163 #define INCR_EVENT_COUNT(base,flags) do { \
3164 ((base)->event_count += (~((flags) >> 4) & 1)); \
3165 MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count); \
3169 event_queue_remove_inserted(struct event_base
*base
, struct event
*ev
)
3171 EVENT_BASE_ASSERT_LOCKED(base
);
3172 if (EVUTIL_FAILURE_CHECK(!(ev
->ev_flags
& EVLIST_INSERTED
))) {
3173 event_errx(1, "%s: %p(fd "EV_SOCK_FMT
") not on queue %x", __func__
,
3174 ev
, EV_SOCK_ARG(ev
->ev_fd
), EVLIST_INSERTED
);
3177 DECR_EVENT_COUNT(base
, ev
->ev_flags
);
3178 ev
->ev_flags
&= ~EVLIST_INSERTED
;
3181 event_queue_remove_active(struct event_base
*base
, struct event_callback
*evcb
)
3183 EVENT_BASE_ASSERT_LOCKED(base
);
3184 if (EVUTIL_FAILURE_CHECK(!(evcb
->evcb_flags
& EVLIST_ACTIVE
))) {
3185 event_errx(1, "%s: %p not on queue %x", __func__
,
3186 evcb
, EVLIST_ACTIVE
);
3189 DECR_EVENT_COUNT(base
, evcb
->evcb_flags
);
3190 evcb
->evcb_flags
&= ~EVLIST_ACTIVE
;
3191 base
->event_count_active
--;
3193 TAILQ_REMOVE(&base
->activequeues
[evcb
->evcb_pri
],
3194 evcb
, evcb_active_next
);
3197 event_queue_remove_active_later(struct event_base
*base
, struct event_callback
*evcb
)
3199 EVENT_BASE_ASSERT_LOCKED(base
);
3200 if (EVUTIL_FAILURE_CHECK(!(evcb
->evcb_flags
& EVLIST_ACTIVE_LATER
))) {
3201 event_errx(1, "%s: %p not on queue %x", __func__
,
3202 evcb
, EVLIST_ACTIVE_LATER
);
3205 DECR_EVENT_COUNT(base
, evcb
->evcb_flags
);
3206 evcb
->evcb_flags
&= ~EVLIST_ACTIVE_LATER
;
3207 base
->event_count_active
--;
3209 TAILQ_REMOVE(&base
->active_later_queue
, evcb
, evcb_active_next
);
3212 event_queue_remove_timeout(struct event_base
*base
, struct event
*ev
)
3214 EVENT_BASE_ASSERT_LOCKED(base
);
3215 if (EVUTIL_FAILURE_CHECK(!(ev
->ev_flags
& EVLIST_TIMEOUT
))) {
3216 event_errx(1, "%s: %p(fd "EV_SOCK_FMT
") not on queue %x", __func__
,
3217 ev
, EV_SOCK_ARG(ev
->ev_fd
), EVLIST_TIMEOUT
);
3220 DECR_EVENT_COUNT(base
, ev
->ev_flags
);
3221 ev
->ev_flags
&= ~EVLIST_TIMEOUT
;
3223 if (is_common_timeout(&ev
->ev_timeout
, base
)) {
3224 struct common_timeout_list
*ctl
=
3225 get_common_timeout_list(base
, &ev
->ev_timeout
);
3226 TAILQ_REMOVE(&ctl
->events
, ev
,
3227 ev_timeout_pos
.ev_next_with_common_timeout
);
3229 min_heap_erase_(&base
->timeheap
, ev
);
3233 #ifdef USE_REINSERT_TIMEOUT
3234 /* Remove and reinsert 'ev' into the timeout queue. */
3236 event_queue_reinsert_timeout(struct event_base
*base
, struct event
*ev
,
3237 int was_common
, int is_common
, int old_timeout_idx
)
3239 struct common_timeout_list
*ctl
;
3240 if (!(ev
->ev_flags
& EVLIST_TIMEOUT
)) {
3241 event_queue_insert_timeout(base
, ev
);
3245 switch ((was_common
<<1) | is_common
) {
3246 case 3: /* Changing from one common timeout to another */
3247 ctl
= base
->common_timeout_queues
[old_timeout_idx
];
3248 TAILQ_REMOVE(&ctl
->events
, ev
,
3249 ev_timeout_pos
.ev_next_with_common_timeout
);
3250 ctl
= get_common_timeout_list(base
, &ev
->ev_timeout
);
3251 insert_common_timeout_inorder(ctl
, ev
);
3253 case 2: /* Was common; is no longer common */
3254 ctl
= base
->common_timeout_queues
[old_timeout_idx
];
3255 TAILQ_REMOVE(&ctl
->events
, ev
,
3256 ev_timeout_pos
.ev_next_with_common_timeout
);
3257 min_heap_push_(&base
->timeheap
, ev
);
3259 case 1: /* Wasn't common; has become common. */
3260 min_heap_erase_(&base
->timeheap
, ev
);
3261 ctl
= get_common_timeout_list(base
, &ev
->ev_timeout
);
3262 insert_common_timeout_inorder(ctl
, ev
);
3264 case 0: /* was in heap; is still on heap. */
3265 min_heap_adjust_(&base
->timeheap
, ev
);
3268 EVUTIL_ASSERT(0); /* unreachable */
3274 /* Add 'ev' to the common timeout list in 'ev'. */
3276 insert_common_timeout_inorder(struct common_timeout_list
*ctl
,
3280 /* By all logic, we should just be able to append 'ev' to the end of
3281 * ctl->events, since the timeout on each 'ev' is set to {the common
3282 * timeout} + {the time when we add the event}, and so the events
3283 * should arrive in order of their timeeouts. But just in case
3284 * there's some wacky threading issue going on, we do a search from
3285 * the end of 'ev' to find the right insertion point.
3287 TAILQ_FOREACH_REVERSE(e
, &ctl
->events
,
3288 event_list
, ev_timeout_pos
.ev_next_with_common_timeout
) {
3289 /* This timercmp is a little sneaky, since both ev and e have
3290 * magic values in tv_usec. Fortunately, they ought to have
3291 * the _same_ magic values in tv_usec. Let's assert for that.
3294 is_same_common_timeout(&e
->ev_timeout
, &ev
->ev_timeout
));
3295 if (evutil_timercmp(&ev
->ev_timeout
, &e
->ev_timeout
, >=)) {
3296 TAILQ_INSERT_AFTER(&ctl
->events
, e
, ev
,
3297 ev_timeout_pos
.ev_next_with_common_timeout
);
3301 TAILQ_INSERT_HEAD(&ctl
->events
, ev
,
3302 ev_timeout_pos
.ev_next_with_common_timeout
);
3306 event_queue_insert_inserted(struct event_base
*base
, struct event
*ev
)
3308 EVENT_BASE_ASSERT_LOCKED(base
);
3310 if (EVUTIL_FAILURE_CHECK(ev
->ev_flags
& EVLIST_INSERTED
)) {
3311 event_errx(1, "%s: %p(fd "EV_SOCK_FMT
") already inserted", __func__
,
3312 ev
, EV_SOCK_ARG(ev
->ev_fd
));
3316 INCR_EVENT_COUNT(base
, ev
->ev_flags
);
3318 ev
->ev_flags
|= EVLIST_INSERTED
;
3322 event_queue_insert_active(struct event_base
*base
, struct event_callback
*evcb
)
3324 EVENT_BASE_ASSERT_LOCKED(base
);
3326 if (evcb
->evcb_flags
& EVLIST_ACTIVE
) {
3327 /* Double insertion is possible for active events */
3331 INCR_EVENT_COUNT(base
, evcb
->evcb_flags
);
3333 evcb
->evcb_flags
|= EVLIST_ACTIVE
;
3335 base
->event_count_active
++;
3336 MAX_EVENT_COUNT(base
->event_count_active_max
, base
->event_count_active
);
3337 EVUTIL_ASSERT(evcb
->evcb_pri
< base
->nactivequeues
);
3338 TAILQ_INSERT_TAIL(&base
->activequeues
[evcb
->evcb_pri
],
3339 evcb
, evcb_active_next
);
3343 event_queue_insert_active_later(struct event_base
*base
, struct event_callback
*evcb
)
3345 EVENT_BASE_ASSERT_LOCKED(base
);
3346 if (evcb
->evcb_flags
& (EVLIST_ACTIVE_LATER
|EVLIST_ACTIVE
)) {
3347 /* Double insertion is possible */
3351 INCR_EVENT_COUNT(base
, evcb
->evcb_flags
);
3352 evcb
->evcb_flags
|= EVLIST_ACTIVE_LATER
;
3353 base
->event_count_active
++;
3354 MAX_EVENT_COUNT(base
->event_count_active_max
, base
->event_count_active
);
3355 EVUTIL_ASSERT(evcb
->evcb_pri
< base
->nactivequeues
);
3356 TAILQ_INSERT_TAIL(&base
->active_later_queue
, evcb
, evcb_active_next
);
3360 event_queue_insert_timeout(struct event_base
*base
, struct event
*ev
)
3362 EVENT_BASE_ASSERT_LOCKED(base
);
3364 if (EVUTIL_FAILURE_CHECK(ev
->ev_flags
& EVLIST_TIMEOUT
)) {
3365 event_errx(1, "%s: %p(fd "EV_SOCK_FMT
") already on timeout", __func__
,
3366 ev
, EV_SOCK_ARG(ev
->ev_fd
));
3370 INCR_EVENT_COUNT(base
, ev
->ev_flags
);
3372 ev
->ev_flags
|= EVLIST_TIMEOUT
;
3374 if (is_common_timeout(&ev
->ev_timeout
, base
)) {
3375 struct common_timeout_list
*ctl
=
3376 get_common_timeout_list(base
, &ev
->ev_timeout
);
3377 insert_common_timeout_inorder(ctl
, ev
);
3379 min_heap_push_(&base
->timeheap
, ev
);
3384 event_queue_make_later_events_active(struct event_base
*base
)
3386 struct event_callback
*evcb
;
3387 EVENT_BASE_ASSERT_LOCKED(base
);
3389 while ((evcb
= TAILQ_FIRST(&base
->active_later_queue
))) {
3390 TAILQ_REMOVE(&base
->active_later_queue
, evcb
, evcb_active_next
);
3391 evcb
->evcb_flags
= (evcb
->evcb_flags
& ~EVLIST_ACTIVE_LATER
) | EVLIST_ACTIVE
;
3392 EVUTIL_ASSERT(evcb
->evcb_pri
< base
->nactivequeues
);
3393 TAILQ_INSERT_TAIL(&base
->activequeues
[evcb
->evcb_pri
], evcb
, evcb_active_next
);
3394 base
->n_deferreds_queued
+= (evcb
->evcb_closure
== EV_CLOSURE_CB_SELF
);
3398 /* Functions for debugging */
3401 event_get_version(void)
3403 return (EVENT__VERSION
);
3407 event_get_version_number(void)
3409 return (EVENT__NUMERIC_VERSION
);
3413 * No thread-safe interface needed - the information should be the same
3418 event_get_method(void)
3420 return (current_base
->evsel
->name
);
3423 #ifndef EVENT__DISABLE_MM_REPLACEMENT
3424 static void *(*mm_malloc_fn_
)(size_t sz
) = NULL
;
3425 static void *(*mm_realloc_fn_
)(void *p
, size_t sz
) = NULL
;
3426 static void (*mm_free_fn_
)(void *p
) = NULL
;
3429 event_mm_malloc_(size_t sz
)
3435 return mm_malloc_fn_(sz
);
3441 event_mm_calloc_(size_t count
, size_t size
)
3443 if (count
== 0 || size
== 0)
3446 if (mm_malloc_fn_
) {
3447 size_t sz
= count
* size
;
3449 if (count
> EV_SIZE_MAX
/ size
)
3451 p
= mm_malloc_fn_(sz
);
3453 return memset(p
, 0, sz
);
3455 void *p
= calloc(count
, size
);
3457 /* Windows calloc doesn't reliably set ENOMEM */
3470 event_mm_strdup_(const char *str
)
3477 if (mm_malloc_fn_
) {
3478 size_t ln
= strlen(str
);
3480 if (ln
== EV_SIZE_MAX
)
3482 p
= mm_malloc_fn_(ln
+1);
3484 return memcpy(p
, str
, ln
+1);
3487 return _strdup(str
);
3498 event_mm_realloc_(void *ptr
, size_t sz
)
3501 return mm_realloc_fn_(ptr
, sz
);
3503 return realloc(ptr
, sz
);
3507 event_mm_free_(void *ptr
)
3516 event_set_mem_functions(void *(*malloc_fn
)(size_t sz
),
3517 void *(*realloc_fn
)(void *ptr
, size_t sz
),
3518 void (*free_fn
)(void *ptr
))
3520 mm_malloc_fn_
= malloc_fn
;
3521 mm_realloc_fn_
= realloc_fn
;
3522 mm_free_fn_
= free_fn
;
3526 #ifdef EVENT__HAVE_EVENTFD
3528 evthread_notify_drain_eventfd(evutil_socket_t fd
, short what
, void *arg
)
3532 struct event_base
*base
= arg
;
3534 r
= read(fd
, (void*) &msg
, sizeof(msg
));
3535 if (r
<0 && errno
!= EAGAIN
) {
3536 event_sock_warn(fd
, "Error reading from eventfd");
3538 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
3539 base
->is_notify_pending
= 0;
3540 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
3545 evthread_notify_drain_default(evutil_socket_t fd
, short what
, void *arg
)
3547 unsigned char buf
[1024];
3548 struct event_base
*base
= arg
;
3550 while (recv(fd
, (char*)buf
, sizeof(buf
), 0) > 0)
3553 while (read(fd
, (char*)buf
, sizeof(buf
)) > 0)
3557 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
3558 base
->is_notify_pending
= 0;
3559 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
3563 evthread_make_base_notifiable(struct event_base
*base
)
3569 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
3570 r
= evthread_make_base_notifiable_nolock_(base
);
3571 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
3576 evthread_make_base_notifiable_nolock_(struct event_base
*base
)
3578 void (*cb
)(evutil_socket_t
, short, void *);
3579 int (*notify
)(struct event_base
*);
3581 if (base
->th_notify_fn
!= NULL
) {
3582 /* The base is already notifiable: we're doing fine. */
3586 #if defined(EVENT__HAVE_WORKING_KQUEUE)
3587 if (base
->evsel
== &kqops
&& event_kq_add_notify_event_(base
) == 0) {
3588 base
->th_notify_fn
= event_kq_notify_base_
;
3589 /* No need to add an event here; the backend can wake
3590 * itself up just fine. */
3595 #ifdef EVENT__HAVE_EVENTFD
3596 base
->th_notify_fd
[0] = evutil_eventfd_(0,
3597 EVUTIL_EFD_CLOEXEC
|EVUTIL_EFD_NONBLOCK
);
3598 if (base
->th_notify_fd
[0] >= 0) {
3599 base
->th_notify_fd
[1] = -1;
3600 notify
= evthread_notify_base_eventfd
;
3601 cb
= evthread_notify_drain_eventfd
;
3604 if (evutil_make_internal_pipe_(base
->th_notify_fd
) == 0) {
3605 notify
= evthread_notify_base_default
;
3606 cb
= evthread_notify_drain_default
;
3611 base
->th_notify_fn
= notify
;
3613 /* prepare an event that we can use for wakeup */
3614 event_assign(&base
->th_notify
, base
, base
->th_notify_fd
[0],
3615 EV_READ
|EV_PERSIST
, cb
, base
);
3617 /* we need to mark this as internal event */
3618 base
->th_notify
.ev_flags
|= EVLIST_INTERNAL
;
3619 event_priority_set(&base
->th_notify
, 0);
3621 return event_add_nolock_(&base
->th_notify
, NULL
, 0);
3625 event_base_foreach_event_nolock_(struct event_base
*base
,
3626 event_base_foreach_event_cb fn
, void *arg
)
3632 /* Start out with all the EVLIST_INSERTED events. */
3633 if ((r
= evmap_foreach_event_(base
, fn
, arg
)))
3636 /* Okay, now we deal with those events that have timeouts and are in
3638 for (u
= 0; u
< base
->timeheap
.n
; ++u
) {
3639 ev
= base
->timeheap
.p
[u
];
3640 if (ev
->ev_flags
& EVLIST_INSERTED
) {
3641 /* we already processed this one */
3644 if ((r
= fn(base
, ev
, arg
)))
3648 /* Now for the events in one of the timeout queues.
3650 for (i
= 0; i
< base
->n_common_timeouts
; ++i
) {
3651 struct common_timeout_list
*ctl
=
3652 base
->common_timeout_queues
[i
];
3653 TAILQ_FOREACH(ev
, &ctl
->events
,
3654 ev_timeout_pos
.ev_next_with_common_timeout
) {
3655 if (ev
->ev_flags
& EVLIST_INSERTED
) {
3656 /* we already processed this one */
3659 if ((r
= fn(base
, ev
, arg
)))
3664 /* Finally, we deal wit all the active events that we haven't touched
3666 for (i
= 0; i
< base
->nactivequeues
; ++i
) {
3667 struct event_callback
*evcb
;
3668 TAILQ_FOREACH(evcb
, &base
->activequeues
[i
], evcb_active_next
) {
3669 if ((evcb
->evcb_flags
& (EVLIST_INIT
|EVLIST_INSERTED
|EVLIST_TIMEOUT
)) != EVLIST_INIT
) {
3670 /* This isn't an event (evlist_init clear), or
3671 * we already processed it. (inserted or
3675 ev
= event_callback_to_event(evcb
);
3676 if ((r
= fn(base
, ev
, arg
)))
3684 /* Helper for event_base_dump_events: called on each event in the event base;
3685 * dumps only the inserted events. */
3687 dump_inserted_event_fn(const struct event_base
*base
, const struct event
*e
, void *arg
)
3690 const char *gloss
= (e
->ev_events
& EV_SIGNAL
) ?
3693 if (! (e
->ev_flags
& (EVLIST_INSERTED
|EVLIST_TIMEOUT
)))
3696 fprintf(output
, " %p [%s "EV_SOCK_FMT
"]%s%s%s%s%s%s",
3697 (void*)e
, gloss
, EV_SOCK_ARG(e
->ev_fd
),
3698 (e
->ev_events
&EV_READ
)?" Read":"",
3699 (e
->ev_events
&EV_WRITE
)?" Write":"",
3700 (e
->ev_events
&EV_CLOSED
)?" EOF":"",
3701 (e
->ev_events
&EV_SIGNAL
)?" Signal":"",
3702 (e
->ev_events
&EV_PERSIST
)?" Persist":"",
3703 (e
->ev_flags
&EVLIST_INTERNAL
)?" Internal":"");
3704 if (e
->ev_flags
& EVLIST_TIMEOUT
) {
3706 tv
.tv_sec
= e
->ev_timeout
.tv_sec
;
3707 tv
.tv_usec
= e
->ev_timeout
.tv_usec
& MICROSECONDS_MASK
;
3708 evutil_timeradd(&tv
, &base
->tv_clock_diff
, &tv
);
3709 fprintf(output
, " Timeout=%ld.%06d",
3710 (long)tv
.tv_sec
, (int)(tv
.tv_usec
& MICROSECONDS_MASK
));
3712 fputc('\n', output
);
3717 /* Helper for event_base_dump_events: called on each event in the event base;
3718 * dumps only the active events. */
3720 dump_active_event_fn(const struct event_base
*base
, const struct event
*e
, void *arg
)
3723 const char *gloss
= (e
->ev_events
& EV_SIGNAL
) ?
3726 if (! (e
->ev_flags
& (EVLIST_ACTIVE
|EVLIST_ACTIVE_LATER
)))
3729 fprintf(output
, " %p [%s "EV_SOCK_FMT
", priority=%d]%s%s%s%s%s active%s%s\n",
3730 (void*)e
, gloss
, EV_SOCK_ARG(e
->ev_fd
), e
->ev_pri
,
3731 (e
->ev_res
&EV_READ
)?" Read":"",
3732 (e
->ev_res
&EV_WRITE
)?" Write":"",
3733 (e
->ev_res
&EV_CLOSED
)?" EOF":"",
3734 (e
->ev_res
&EV_SIGNAL
)?" Signal":"",
3735 (e
->ev_res
&EV_TIMEOUT
)?" Timeout":"",
3736 (e
->ev_flags
&EVLIST_INTERNAL
)?" [Internal]":"",
3737 (e
->ev_flags
&EVLIST_ACTIVE_LATER
)?" [NextTime]":"");
3743 event_base_foreach_event(struct event_base
*base
,
3744 event_base_foreach_event_cb fn
, void *arg
)
3747 if ((!fn
) || (!base
)) {
3750 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
3751 r
= event_base_foreach_event_nolock_(base
, fn
, arg
);
3752 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
3758 event_base_dump_events(struct event_base
*base
, FILE *output
)
3760 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
3761 fprintf(output
, "Inserted events:\n");
3762 event_base_foreach_event_nolock_(base
, dump_inserted_event_fn
, output
);
3764 fprintf(output
, "Active events:\n");
3765 event_base_foreach_event_nolock_(base
, dump_active_event_fn
, output
);
3766 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
3770 event_base_active_by_fd(struct event_base
*base
, evutil_socket_t fd
, short events
)
3772 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
3773 evmap_io_active_(base
, fd
, events
& (EV_READ
|EV_WRITE
|EV_CLOSED
));
3774 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
3778 event_base_active_by_signal(struct event_base
*base
, int sig
)
3780 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
3781 evmap_signal_active_(base
, sig
, 1);
3782 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
3787 event_base_add_virtual_(struct event_base
*base
)
3789 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
3790 base
->virtual_event_count
++;
3791 MAX_EVENT_COUNT(base
->virtual_event_count_max
, base
->virtual_event_count
);
3792 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
3796 event_base_del_virtual_(struct event_base
*base
)
3798 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
3799 EVUTIL_ASSERT(base
->virtual_event_count
> 0);
3800 base
->virtual_event_count
--;
3801 if (base
->virtual_event_count
== 0 && EVBASE_NEED_NOTIFY(base
))
3802 evthread_notify_base(base
);
3803 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
3807 event_free_debug_globals_locks(void)
3809 #ifndef EVENT__DISABLE_THREAD_SUPPORT
3810 #ifndef EVENT__DISABLE_DEBUG_MODE
3811 if (event_debug_map_lock_
!= NULL
) {
3812 EVTHREAD_FREE_LOCK(event_debug_map_lock_
, 0);
3813 event_debug_map_lock_
= NULL
;
3814 evthreadimpl_disable_lock_debugging_();
3816 #endif /* EVENT__DISABLE_DEBUG_MODE */
3817 #endif /* EVENT__DISABLE_THREAD_SUPPORT */
3822 event_free_debug_globals(void)
3824 event_free_debug_globals_locks();
3828 event_free_evsig_globals(void)
3830 evsig_free_globals_();
3834 event_free_evutil_globals(void)
3836 evutil_free_globals_();
3840 event_free_globals(void)
3842 event_free_debug_globals();
3843 event_free_evsig_globals();
3844 event_free_evutil_globals();
3848 libevent_global_shutdown(void)
3850 event_disable_debug_mode();
3851 event_free_globals();
3854 #ifndef EVENT__DISABLE_THREAD_SUPPORT
3856 event_global_setup_locks_(const int enable_locks
)
3858 #ifndef EVENT__DISABLE_DEBUG_MODE
3859 EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_
, 0);
3861 if (evsig_global_setup_locks_(enable_locks
) < 0)
3863 if (evutil_global_setup_locks_(enable_locks
) < 0)
3865 if (evutil_secure_rng_global_setup_locks_(enable_locks
) < 0)
3872 event_base_assert_ok_(struct event_base
*base
)
3874 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
3875 event_base_assert_ok_nolock_(base
);
3876 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
3880 event_base_assert_ok_nolock_(struct event_base
*base
)
3885 /* First do checks on the per-fd and per-signal lists */
3886 evmap_check_integrity_(base
);
3888 /* Check the heap property */
3889 for (i
= 1; i
< (int)base
->timeheap
.n
; ++i
) {
3890 int parent
= (i
- 1) / 2;
3891 struct event
*ev
, *p_ev
;
3892 ev
= base
->timeheap
.p
[i
];
3893 p_ev
= base
->timeheap
.p
[parent
];
3894 EVUTIL_ASSERT(ev
->ev_flags
& EVLIST_TIMEOUT
);
3895 EVUTIL_ASSERT(evutil_timercmp(&p_ev
->ev_timeout
, &ev
->ev_timeout
, <=));
3896 EVUTIL_ASSERT(ev
->ev_timeout_pos
.min_heap_idx
== i
);
3899 /* Check that the common timeouts are fine */
3900 for (i
= 0; i
< base
->n_common_timeouts
; ++i
) {
3901 struct common_timeout_list
*ctl
= base
->common_timeout_queues
[i
];
3902 struct event
*last
=NULL
, *ev
;
3904 EVUTIL_ASSERT_TAILQ_OK(&ctl
->events
, event
, ev_timeout_pos
.ev_next_with_common_timeout
);
3906 TAILQ_FOREACH(ev
, &ctl
->events
, ev_timeout_pos
.ev_next_with_common_timeout
) {
3908 EVUTIL_ASSERT(evutil_timercmp(&last
->ev_timeout
, &ev
->ev_timeout
, <=));
3909 EVUTIL_ASSERT(ev
->ev_flags
& EVLIST_TIMEOUT
);
3910 EVUTIL_ASSERT(is_common_timeout(&ev
->ev_timeout
,base
));
3911 EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev
->ev_timeout
) == i
);
3916 /* Check the active queues. */
3918 for (i
= 0; i
< base
->nactivequeues
; ++i
) {
3919 struct event_callback
*evcb
;
3920 EVUTIL_ASSERT_TAILQ_OK(&base
->activequeues
[i
], event_callback
, evcb_active_next
);
3921 TAILQ_FOREACH(evcb
, &base
->activequeues
[i
], evcb_active_next
) {
3922 EVUTIL_ASSERT((evcb
->evcb_flags
& (EVLIST_ACTIVE
|EVLIST_ACTIVE_LATER
)) == EVLIST_ACTIVE
);
3923 EVUTIL_ASSERT(evcb
->evcb_pri
== i
);
3929 struct event_callback
*evcb
;
3930 TAILQ_FOREACH(evcb
, &base
->active_later_queue
, evcb_active_next
) {
3931 EVUTIL_ASSERT((evcb
->evcb_flags
& (EVLIST_ACTIVE
|EVLIST_ACTIVE_LATER
)) == EVLIST_ACTIVE_LATER
);
3935 EVUTIL_ASSERT(count
== base
->event_count_active
);