2 * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
3 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "event2/event-config.h"
31 #define WIN32_LEAN_AND_MEAN
33 #undef WIN32_LEAN_AND_MEAN
35 #include <sys/types.h>
36 #if !defined(WIN32) && defined(_EVENT_HAVE_SYS_TIME_H)
39 #include <sys/queue.h>
40 #ifdef _EVENT_HAVE_SYS_SOCKET_H
41 #include <sys/socket.h>
45 #ifdef _EVENT_HAVE_UNISTD_H
48 #ifdef _EVENT_HAVE_SYS_EVENTFD_H
49 #include <sys/eventfd.h>
57 #include "event2/event.h"
58 #include "event2/event_struct.h"
59 #include "event2/event_compat.h"
60 #include "event-internal.h"
61 #include "defer-internal.h"
62 #include "evthread-internal.h"
63 #include "event2/thread.h"
64 #include "event2/util.h"
65 #include "log-internal.h"
66 #include "evmap-internal.h"
67 #include "iocp-internal.h"
68 #include "changelist-internal.h"
69 #include "ht-internal.h"
70 #include "util-internal.h"
72 #ifdef _EVENT_HAVE_EVENT_PORTS
73 extern const struct eventop evportops
;
75 #ifdef _EVENT_HAVE_SELECT
76 extern const struct eventop selectops
;
78 #ifdef _EVENT_HAVE_POLL
79 extern const struct eventop pollops
;
81 #ifdef _EVENT_HAVE_EPOLL
82 extern const struct eventop epollops
;
84 #ifdef _EVENT_HAVE_WORKING_KQUEUE
85 extern const struct eventop kqops
;
87 #ifdef _EVENT_HAVE_DEVPOLL
88 extern const struct eventop devpollops
;
91 extern const struct eventop win32ops
;
94 /* Array of backends in order of preference. */
95 static const struct eventop
*eventops
[] = {
96 #ifdef _EVENT_HAVE_EVENT_PORTS
99 #ifdef _EVENT_HAVE_WORKING_KQUEUE
102 #ifdef _EVENT_HAVE_EPOLL
105 #ifdef _EVENT_HAVE_DEVPOLL
108 #ifdef _EVENT_HAVE_POLL
111 #ifdef _EVENT_HAVE_SELECT
120 /* Global state; deprecated */
121 struct event_base
*event_global_current_base_
= NULL
;
122 #define current_base event_global_current_base_
126 static int use_monotonic
;
129 static inline int event_add_internal(struct event
*ev
,
130 const struct timeval
*tv
, int tv_is_absolute
);
131 static inline int event_del_internal(struct event
*ev
);
133 static void event_queue_insert(struct event_base
*, struct event
*, int);
134 static void event_queue_remove(struct event_base
*, struct event
*, int);
135 static int event_haveevents(struct event_base
*);
137 static int event_process_active(struct event_base
*);
139 static int timeout_next(struct event_base
*, struct timeval
**);
140 static void timeout_process(struct event_base
*);
141 static void timeout_correct(struct event_base
*, struct timeval
*);
143 static inline void event_signal_closure(struct event_base
*, struct event
*ev
);
144 static inline void event_persist_closure(struct event_base
*, struct event
*ev
);
146 static int evthread_notify_base(struct event_base
*base
);
148 #ifndef _EVENT_DISABLE_DEBUG_MODE
149 /* These functions implement a hashtable of which 'struct event *' structures
150 * have been setup or added. We don't want to trust the content of the struct
151 * event itself, since we're trying to work through cases where an event gets
152 * clobbered or freed. Instead, we keep a hashtable indexed by the pointer.
155 struct event_debug_entry
{
156 HT_ENTRY(event_debug_entry
) node
;
157 const struct event
*ptr
;
161 static inline unsigned
162 hash_debug_entry(const struct event_debug_entry
*e
)
164 /* We need to do this silliness to convince compilers that we
165 * honestly mean to cast e->ptr to an integer, and discard any
166 * part of it that doesn't fit in an unsigned.
168 unsigned u
= (unsigned) ((ev_uintptr_t
) e
->ptr
);
169 /* Our hashtable implementation is pretty sensitive to low bits,
170 * and every struct event is over 64 bytes in size, so we can
176 eq_debug_entry(const struct event_debug_entry
*a
,
177 const struct event_debug_entry
*b
)
179 return a
->ptr
== b
->ptr
;
182 int _event_debug_mode_on
= 0;
183 /* Set if it's too late to enable event_debug_mode. */
184 static int event_debug_mode_too_late
= 0;
185 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
186 static void *_event_debug_map_lock
= NULL
;
188 static HT_HEAD(event_debug_map
, event_debug_entry
) global_debug_map
=
191 HT_PROTOTYPE(event_debug_map
, event_debug_entry
, node
, hash_debug_entry
,
193 HT_GENERATE(event_debug_map
, event_debug_entry
, node
, hash_debug_entry
,
194 eq_debug_entry
, 0.5, mm_malloc
, mm_realloc
, mm_free
)
196 /* Macro: record that ev is now setup (that is, ready for an add) */
197 #define _event_debug_note_setup(ev) do { \
198 if (_event_debug_mode_on) { \
199 struct event_debug_entry *dent,find; \
201 EVLOCK_LOCK(_event_debug_map_lock, 0); \
202 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
206 dent = mm_malloc(sizeof(*dent)); \
209 "Out of memory in debugging code"); \
212 HT_INSERT(event_debug_map, &global_debug_map, dent); \
214 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
216 event_debug_mode_too_late = 1; \
218 /* Macro: record that ev is no longer setup */
219 #define _event_debug_note_teardown(ev) do { \
220 if (_event_debug_mode_on) { \
221 struct event_debug_entry *dent,find; \
223 EVLOCK_LOCK(_event_debug_map_lock, 0); \
224 dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \
227 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
229 event_debug_mode_too_late = 1; \
231 /* Macro: record that ev is now added */
232 #define _event_debug_note_add(ev) do { \
233 if (_event_debug_mode_on) { \
234 struct event_debug_entry *dent,find; \
236 EVLOCK_LOCK(_event_debug_map_lock, 0); \
237 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
241 event_errx(_EVENT_ERR_ABORT, \
242 "%s: noting an add on a non-setup event %p" \
243 " (events: 0x%x, fd: "EV_SOCK_FMT \
245 __func__, (ev), (ev)->ev_events, \
246 EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
248 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
250 event_debug_mode_too_late = 1; \
252 /* Macro: record that ev is no longer added */
253 #define _event_debug_note_del(ev) do { \
254 if (_event_debug_mode_on) { \
255 struct event_debug_entry *dent,find; \
257 EVLOCK_LOCK(_event_debug_map_lock, 0); \
258 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
262 event_errx(_EVENT_ERR_ABORT, \
263 "%s: noting a del on a non-setup event %p" \
264 " (events: 0x%x, fd: "EV_SOCK_FMT \
266 __func__, (ev), (ev)->ev_events, \
267 EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
269 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
271 event_debug_mode_too_late = 1; \
273 /* Macro: assert that ev is setup (i.e., okay to add or inspect) */
274 #define _event_debug_assert_is_setup(ev) do { \
275 if (_event_debug_mode_on) { \
276 struct event_debug_entry *dent,find; \
278 EVLOCK_LOCK(_event_debug_map_lock, 0); \
279 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
281 event_errx(_EVENT_ERR_ABORT, \
282 "%s called on a non-initialized event %p" \
283 " (events: 0x%x, fd: "EV_SOCK_FMT\
285 __func__, (ev), (ev)->ev_events, \
286 EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
288 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
291 /* Macro: assert that ev is not added (i.e., okay to tear down or set
293 #define _event_debug_assert_not_added(ev) do { \
294 if (_event_debug_mode_on) { \
295 struct event_debug_entry *dent,find; \
297 EVLOCK_LOCK(_event_debug_map_lock, 0); \
298 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
299 if (dent && dent->added) { \
300 event_errx(_EVENT_ERR_ABORT, \
301 "%s called on an already added event %p" \
302 " (events: 0x%x, fd: "EV_SOCK_FMT", " \
304 __func__, (ev), (ev)->ev_events, \
305 EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
307 EVLOCK_UNLOCK(_event_debug_map_lock, 0); \
311 #define _event_debug_note_setup(ev) \
313 #define _event_debug_note_teardown(ev) \
315 #define _event_debug_note_add(ev) \
317 #define _event_debug_note_del(ev) \
319 #define _event_debug_assert_is_setup(ev) \
321 #define _event_debug_assert_not_added(ev) \
325 #define EVENT_BASE_ASSERT_LOCKED(base) \
326 EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
328 /* The first time this function is called, it sets use_monotonic to 1
329 * if we have a clock function that supports monotonic time */
331 detect_monotonic(void)
333 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
335 static int use_monotonic_initialized
= 0;
337 if (use_monotonic_initialized
)
340 if (clock_gettime(CLOCK_MONOTONIC
, &ts
) == 0)
343 use_monotonic_initialized
= 1;
347 /* How often (in seconds) do we check for changes in wall clock time relative
348 * to monotonic time? Set this to -1 for 'never.' */
349 #define CLOCK_SYNC_INTERVAL -1
351 /** Set 'tp' to the current time according to 'base'. We must hold the lock
352 * on 'base'. If there is a cached time, return it. Otherwise, use
353 * clock_gettime or gettimeofday as appropriate to find out the right time.
354 * Return 0 on success, -1 on failure.
357 gettime(struct event_base
*base
, struct timeval
*tp
)
359 EVENT_BASE_ASSERT_LOCKED(base
);
361 if (base
->tv_cache
.tv_sec
) {
362 *tp
= base
->tv_cache
;
366 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
370 if (clock_gettime(CLOCK_MONOTONIC
, &ts
) == -1)
373 tp
->tv_sec
= ts
.tv_sec
;
374 tp
->tv_usec
= ts
.tv_nsec
/ 1000;
375 if (base
->last_updated_clock_diff
+ CLOCK_SYNC_INTERVAL
378 evutil_gettimeofday(&tv
,NULL
);
379 evutil_timersub(&tv
, tp
, &base
->tv_clock_diff
);
380 base
->last_updated_clock_diff
= ts
.tv_sec
;
387 return (evutil_gettimeofday(tp
, NULL
));
391 event_base_gettimeofday_cached(struct event_base
*base
, struct timeval
*tv
)
397 return evutil_gettimeofday(tv
, NULL
);
400 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
401 if (base
->tv_cache
.tv_sec
== 0) {
402 r
= evutil_gettimeofday(tv
, NULL
);
404 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
405 evutil_timeradd(&base
->tv_cache
, &base
->tv_clock_diff
, tv
);
407 *tv
= base
->tv_cache
;
411 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
415 /** Make 'base' have no current cached time. */
417 clear_time_cache(struct event_base
*base
)
419 base
->tv_cache
.tv_sec
= 0;
422 /** Replace the cached time in 'base' with the current time. */
424 update_time_cache(struct event_base
*base
)
426 base
->tv_cache
.tv_sec
= 0;
427 if (!(base
->flags
& EVENT_BASE_FLAG_NO_CACHE_TIME
))
428 gettime(base
, &base
->tv_cache
);
434 struct event_base
*base
= event_base_new_with_config(NULL
);
437 event_errx(1, "%s: Unable to construct event_base", __func__
);
449 struct event_base
*base
= NULL
;
450 struct event_config
*cfg
= event_config_new();
452 base
= event_base_new_with_config(cfg
);
453 event_config_free(cfg
);
458 /** Return true iff 'method' is the name of a method that 'cfg' tells us to
461 event_config_is_avoided_method(const struct event_config
*cfg
,
464 struct event_config_entry
*entry
;
466 TAILQ_FOREACH(entry
, &cfg
->entries
, next
) {
467 if (entry
->avoid_method
!= NULL
&&
468 strcmp(entry
->avoid_method
, method
) == 0)
475 /** Return true iff 'method' is disabled according to the environment. */
477 event_is_method_disabled(const char *name
)
479 char environment
[64];
482 evutil_snprintf(environment
, sizeof(environment
), "EVENT_NO%s", name
);
483 for (i
= 8; environment
[i
] != '\0'; ++i
)
484 environment
[i
] = EVUTIL_TOUPPER(environment
[i
]);
485 /* Note that evutil_getenv() ignores the environment entirely if
487 return (evutil_getenv(environment
) != NULL
);
491 event_base_get_features(const struct event_base
*base
)
493 return base
->evsel
->features
;
497 event_deferred_cb_queue_init(struct deferred_cb_queue
*cb
)
499 memset(cb
, 0, sizeof(struct deferred_cb_queue
));
500 TAILQ_INIT(&cb
->deferred_cb_list
);
503 /** Helper for the deferred_cb queue: wake up the event base. */
505 notify_base_cbq_callback(struct deferred_cb_queue
*cb
, void *baseptr
)
507 struct event_base
*base
= baseptr
;
508 if (EVBASE_NEED_NOTIFY(base
))
509 evthread_notify_base(base
);
512 struct deferred_cb_queue
*
513 event_base_get_deferred_cb_queue(struct event_base
*base
)
515 return base
? &base
->defer_queue
: NULL
;
519 event_enable_debug_mode(void)
521 #ifndef _EVENT_DISABLE_DEBUG_MODE
522 if (_event_debug_mode_on
)
523 event_errx(1, "%s was called twice!", __func__
);
524 if (event_debug_mode_too_late
)
525 event_errx(1, "%s must be called *before* creating any events "
526 "or event_bases",__func__
);
528 _event_debug_mode_on
= 1;
530 HT_INIT(event_debug_map
, &global_debug_map
);
536 event_disable_debug_mode(void)
538 struct event_debug_entry
**ent
, *victim
;
540 EVLOCK_LOCK(_event_debug_map_lock
, 0);
541 for (ent
= HT_START(event_debug_map
, &global_debug_map
); ent
; ) {
543 ent
= HT_NEXT_RMV(event_debug_map
,&global_debug_map
, ent
);
546 HT_CLEAR(event_debug_map
, &global_debug_map
);
547 EVLOCK_UNLOCK(_event_debug_map_lock
, 0);
552 event_base_new_with_config(const struct event_config
*cfg
)
555 struct event_base
*base
;
556 int should_check_environment
;
558 #ifndef _EVENT_DISABLE_DEBUG_MODE
559 event_debug_mode_too_late
= 1;
562 if ((base
= mm_calloc(1, sizeof(struct event_base
))) == NULL
) {
563 event_warn("%s: calloc", __func__
);
567 gettime(base
, &base
->event_tv
);
569 min_heap_ctor(&base
->timeheap
);
570 TAILQ_INIT(&base
->eventqueue
);
571 base
->sig
.ev_signal_pair
[0] = -1;
572 base
->sig
.ev_signal_pair
[1] = -1;
573 base
->th_notify_fd
[0] = -1;
574 base
->th_notify_fd
[1] = -1;
576 event_deferred_cb_queue_init(&base
->defer_queue
);
577 base
->defer_queue
.notify_fn
= notify_base_cbq_callback
;
578 base
->defer_queue
.notify_arg
= base
;
580 base
->flags
= cfg
->flags
;
582 evmap_io_initmap(&base
->io
);
583 evmap_signal_initmap(&base
->sigmap
);
584 event_changelist_init(&base
->changelist
);
588 should_check_environment
=
589 !(cfg
&& (cfg
->flags
& EVENT_BASE_FLAG_IGNORE_ENV
));
591 for (i
= 0; eventops
[i
] && !base
->evbase
; i
++) {
593 /* determine if this backend should be avoided */
594 if (event_config_is_avoided_method(cfg
,
597 if ((eventops
[i
]->features
& cfg
->require_features
)
598 != cfg
->require_features
)
602 /* also obey the environment variables */
603 if (should_check_environment
&&
604 event_is_method_disabled(eventops
[i
]->name
))
607 base
->evsel
= eventops
[i
];
609 base
->evbase
= base
->evsel
->init(base
);
612 if (base
->evbase
== NULL
) {
613 event_warnx("%s: no event mechanism available",
616 event_base_free(base
);
620 if (evutil_getenv("EVENT_SHOW_METHOD"))
621 event_msgx("libevent using: %s", base
->evsel
->name
);
623 /* allocate a single active event queue */
624 if (event_base_priority_init(base
, 1) < 0) {
625 event_base_free(base
);
629 /* prepare for threading */
631 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
632 if (EVTHREAD_LOCKING_ENABLED() &&
633 (!cfg
|| !(cfg
->flags
& EVENT_BASE_FLAG_NOLOCK
))) {
635 EVTHREAD_ALLOC_LOCK(base
->th_base_lock
,
636 EVTHREAD_LOCKTYPE_RECURSIVE
);
637 base
->defer_queue
.lock
= base
->th_base_lock
;
638 EVTHREAD_ALLOC_COND(base
->current_event_cond
);
639 r
= evthread_make_base_notifiable(base
);
641 event_warnx("%s: Unable to make base notifiable.", __func__
);
642 event_base_free(base
);
649 if (cfg
&& (cfg
->flags
& EVENT_BASE_FLAG_STARTUP_IOCP
))
650 event_base_start_iocp(base
, cfg
->n_cpus_hint
);
657 event_base_start_iocp(struct event_base
*base
, int n_cpus
)
662 base
->iocp
= event_iocp_port_launch(n_cpus
);
664 event_warnx("%s: Couldn't launch IOCP", __func__
);
674 event_base_stop_iocp(struct event_base
*base
)
681 rv
= event_iocp_shutdown(base
->iocp
, -1);
682 EVUTIL_ASSERT(rv
>= 0);
688 event_base_free(struct event_base
*base
)
692 /* XXXX grab the lock? If there is contention when one thread frees
693 * the base, then the contending thread will be very sad soon. */
695 /* event_base_free(NULL) is how to free the current_base if we
696 * made it with event_init and forgot to hold a reference to it. */
697 if (base
== NULL
&& current_base
)
699 /* If we're freeing current_base, there won't be a current_base. */
700 if (base
== current_base
)
702 /* Don't actually free NULL. */
704 event_warnx("%s: no base to free", __func__
);
707 /* XXX(niels) - check for internal events first */
710 event_base_stop_iocp(base
);
713 /* threading fds if we have them */
714 if (base
->th_notify_fd
[0] != -1) {
715 event_del(&base
->th_notify
);
716 EVUTIL_CLOSESOCKET(base
->th_notify_fd
[0]);
717 if (base
->th_notify_fd
[1] != -1)
718 EVUTIL_CLOSESOCKET(base
->th_notify_fd
[1]);
719 base
->th_notify_fd
[0] = -1;
720 base
->th_notify_fd
[1] = -1;
721 event_debug_unassign(&base
->th_notify
);
724 /* Delete all non-internal events. */
725 for (ev
= TAILQ_FIRST(&base
->eventqueue
); ev
; ) {
726 struct event
*next
= TAILQ_NEXT(ev
, ev_next
);
727 if (!(ev
->ev_flags
& EVLIST_INTERNAL
)) {
733 while ((ev
= min_heap_top(&base
->timeheap
)) != NULL
) {
737 for (i
= 0; i
< base
->n_common_timeouts
; ++i
) {
738 struct common_timeout_list
*ctl
=
739 base
->common_timeout_queues
[i
];
740 event_del(&ctl
->timeout_event
); /* Internal; doesn't count */
741 event_debug_unassign(&ctl
->timeout_event
);
742 for (ev
= TAILQ_FIRST(&ctl
->events
); ev
; ) {
743 struct event
*next
= TAILQ_NEXT(ev
,
744 ev_timeout_pos
.ev_next_with_common_timeout
);
745 if (!(ev
->ev_flags
& EVLIST_INTERNAL
)) {
753 if (base
->common_timeout_queues
)
754 mm_free(base
->common_timeout_queues
);
756 for (i
= 0; i
< base
->nactivequeues
; ++i
) {
757 for (ev
= TAILQ_FIRST(&base
->activequeues
[i
]); ev
; ) {
758 struct event
*next
= TAILQ_NEXT(ev
, ev_active_next
);
759 if (!(ev
->ev_flags
& EVLIST_INTERNAL
)) {
768 event_debug(("%s: %d events were still set in base",
769 __func__
, n_deleted
));
771 if (base
->evsel
!= NULL
&& base
->evsel
->dealloc
!= NULL
)
772 base
->evsel
->dealloc(base
);
774 for (i
= 0; i
< base
->nactivequeues
; ++i
)
775 EVUTIL_ASSERT(TAILQ_EMPTY(&base
->activequeues
[i
]));
777 EVUTIL_ASSERT(min_heap_empty(&base
->timeheap
));
778 min_heap_dtor(&base
->timeheap
);
780 mm_free(base
->activequeues
);
782 EVUTIL_ASSERT(TAILQ_EMPTY(&base
->eventqueue
));
784 evmap_io_clear(&base
->io
);
785 evmap_signal_clear(&base
->sigmap
);
786 event_changelist_freemem(&base
->changelist
);
788 EVTHREAD_FREE_LOCK(base
->th_base_lock
, EVTHREAD_LOCKTYPE_RECURSIVE
);
789 EVTHREAD_FREE_COND(base
->current_event_cond
);
794 /* reinitialize the event base after a fork */
796 event_reinit(struct event_base
*base
)
798 const struct eventop
*evsel
;
801 int was_notifiable
= 0;
803 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
808 /* Right now, reinit always takes effect, since even if the
809 backend doesn't require it, the signal socketpair code does.
813 /* check if this event mechanism requires reinit */
814 if (!evsel
->need_reinit
)
818 /* prevent internal delete */
819 if (base
->sig
.ev_signal_added
) {
820 /* we cannot call event_del here because the base has
821 * not been reinitialized yet. */
822 event_queue_remove(base
, &base
->sig
.ev_signal
,
824 if (base
->sig
.ev_signal
.ev_flags
& EVLIST_ACTIVE
)
825 event_queue_remove(base
, &base
->sig
.ev_signal
,
827 if (base
->sig
.ev_signal_pair
[0] != -1)
828 EVUTIL_CLOSESOCKET(base
->sig
.ev_signal_pair
[0]);
829 if (base
->sig
.ev_signal_pair
[1] != -1)
830 EVUTIL_CLOSESOCKET(base
->sig
.ev_signal_pair
[1]);
831 base
->sig
.ev_signal_added
= 0;
833 if (base
->th_notify_fd
[0] != -1) {
834 /* we cannot call event_del here because the base has
835 * not been reinitialized yet. */
837 event_queue_remove(base
, &base
->th_notify
,
839 if (base
->th_notify
.ev_flags
& EVLIST_ACTIVE
)
840 event_queue_remove(base
, &base
->th_notify
,
842 base
->sig
.ev_signal_added
= 0;
843 EVUTIL_CLOSESOCKET(base
->th_notify_fd
[0]);
844 if (base
->th_notify_fd
[1] != -1)
845 EVUTIL_CLOSESOCKET(base
->th_notify_fd
[1]);
846 base
->th_notify_fd
[0] = -1;
847 base
->th_notify_fd
[1] = -1;
848 event_debug_unassign(&base
->th_notify
);
851 if (base
->evsel
->dealloc
!= NULL
)
852 base
->evsel
->dealloc(base
);
853 base
->evbase
= evsel
->init(base
);
854 if (base
->evbase
== NULL
) {
855 event_errx(1, "%s: could not reinitialize event mechanism",
861 event_changelist_freemem(&base
->changelist
); /* XXX */
862 evmap_io_clear(&base
->io
);
863 evmap_signal_clear(&base
->sigmap
);
865 TAILQ_FOREACH(ev
, &base
->eventqueue
, ev_next
) {
866 if (ev
->ev_events
& (EV_READ
|EV_WRITE
)) {
867 if (ev
== &base
->sig
.ev_signal
) {
868 /* If we run into the ev_signal event, it's only
869 * in eventqueue because some signal event was
870 * added, which made evsig_add re-add ev_signal.
871 * So don't double-add it. */
874 if (evmap_io_add(base
, ev
->ev_fd
, ev
) == -1)
876 } else if (ev
->ev_events
& EV_SIGNAL
) {
877 if (evmap_signal_add(base
, (int)ev
->ev_fd
, ev
) == -1)
882 if (was_notifiable
&& res
== 0)
883 res
= evthread_make_base_notifiable(base
);
886 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
891 event_get_supported_methods(void)
893 static const char **methods
= NULL
;
894 const struct eventop
**method
;
898 /* count all methods */
899 for (method
= &eventops
[0]; *method
!= NULL
; ++method
) {
903 /* allocate one more than we need for the NULL pointer */
904 tmp
= mm_calloc((i
+ 1), sizeof(char *));
908 /* populate the array with the supported methods */
909 for (k
= 0, i
= 0; eventops
[k
] != NULL
; ++k
) {
910 tmp
[i
++] = eventops
[k
]->name
;
915 mm_free((char**)methods
);
922 struct event_config
*
923 event_config_new(void)
925 struct event_config
*cfg
= mm_calloc(1, sizeof(*cfg
));
930 TAILQ_INIT(&cfg
->entries
);
936 event_config_entry_free(struct event_config_entry
*entry
)
938 if (entry
->avoid_method
!= NULL
)
939 mm_free((char *)entry
->avoid_method
);
944 event_config_free(struct event_config
*cfg
)
946 struct event_config_entry
*entry
;
948 while ((entry
= TAILQ_FIRST(&cfg
->entries
)) != NULL
) {
949 TAILQ_REMOVE(&cfg
->entries
, entry
, next
);
950 event_config_entry_free(entry
);
956 event_config_set_flag(struct event_config
*cfg
, int flag
)
965 event_config_avoid_method(struct event_config
*cfg
, const char *method
)
967 struct event_config_entry
*entry
= mm_malloc(sizeof(*entry
));
971 if ((entry
->avoid_method
= mm_strdup(method
)) == NULL
) {
976 TAILQ_INSERT_TAIL(&cfg
->entries
, entry
, next
);
982 event_config_require_features(struct event_config
*cfg
,
987 cfg
->require_features
= features
;
992 event_config_set_num_cpus_hint(struct event_config
*cfg
, int cpus
)
996 cfg
->n_cpus_hint
= cpus
;
1001 event_priority_init(int npriorities
)
1003 return event_base_priority_init(current_base
, npriorities
);
1007 event_base_priority_init(struct event_base
*base
, int npriorities
)
1011 if (N_ACTIVE_CALLBACKS(base
) || npriorities
< 1
1012 || npriorities
>= EVENT_MAX_PRIORITIES
)
1015 if (npriorities
== base
->nactivequeues
)
1018 if (base
->nactivequeues
) {
1019 mm_free(base
->activequeues
);
1020 base
->nactivequeues
= 0;
1023 /* Allocate our priority queues */
1024 base
->activequeues
= (struct event_list
*)
1025 mm_calloc(npriorities
, sizeof(struct event_list
));
1026 if (base
->activequeues
== NULL
) {
1027 event_warn("%s: calloc", __func__
);
1030 base
->nactivequeues
= npriorities
;
1032 for (i
= 0; i
< base
->nactivequeues
; ++i
) {
1033 TAILQ_INIT(&base
->activequeues
[i
]);
1039 /* Returns true iff we're currently watching any events. */
1041 event_haveevents(struct event_base
*base
)
1043 /* Caller must hold th_base_lock */
1044 return (base
->virtual_event_count
> 0 || base
->event_count
> 0);
1047 /* "closure" function called when processing active signal events */
1049 event_signal_closure(struct event_base
*base
, struct event
*ev
)
1054 /* Allows deletes to work */
1055 ncalls
= ev
->ev_ncalls
;
1057 ev
->ev_pncalls
= &ncalls
;
1058 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1061 ev
->ev_ncalls
= ncalls
;
1063 ev
->ev_pncalls
= NULL
;
1064 (*ev
->ev_callback
)(ev
->ev_fd
, ev
->ev_res
, ev
->ev_arg
);
1066 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
1067 should_break
= base
->event_break
;
1068 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1072 ev
->ev_pncalls
= NULL
;
1078 /* Common timeouts are special timeouts that are handled as queues rather than
1079 * in the minheap. This is more efficient than the minheap if we happen to
1080 * know that we're going to get several thousands of timeout events all with
1081 * the same timeout value.
1083 * Since all our timeout handling code assumes timevals can be copied,
1084 * assigned, etc, we can't use "magic pointer" to encode these common
1085 * timeouts. Searching through a list to see if every timeout is common could
1086 * also get inefficient. Instead, we take advantage of the fact that tv_usec
1087 * is 32 bits long, but only uses 20 of those bits (since it can never be over
1088 * 999999.) We use the top bits to encode 4 bites of magic number, and 8 bits
1089 * of index into the event_base's aray of common timeouts.
1092 #define MICROSECONDS_MASK COMMON_TIMEOUT_MICROSECONDS_MASK
1093 #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
1094 #define COMMON_TIMEOUT_IDX_SHIFT 20
1095 #define COMMON_TIMEOUT_MASK 0xf0000000
1096 #define COMMON_TIMEOUT_MAGIC 0x50000000
1098 #define COMMON_TIMEOUT_IDX(tv) \
1099 (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
1101 /** Return true iff if 'tv' is a common timeout in 'base' */
1103 is_common_timeout(const struct timeval
*tv
,
1104 const struct event_base
*base
)
1107 if ((tv
->tv_usec
& COMMON_TIMEOUT_MASK
) != COMMON_TIMEOUT_MAGIC
)
1109 idx
= COMMON_TIMEOUT_IDX(tv
);
1110 return idx
< base
->n_common_timeouts
;
1113 /* True iff tv1 and tv2 have the same common-timeout index, or if neither
1114 * one is a common timeout. */
1116 is_same_common_timeout(const struct timeval
*tv1
, const struct timeval
*tv2
)
1118 return (tv1
->tv_usec
& ~MICROSECONDS_MASK
) ==
1119 (tv2
->tv_usec
& ~MICROSECONDS_MASK
);
1122 /** Requires that 'tv' is a common timeout. Return the corresponding
1123 * common_timeout_list. */
1124 static inline struct common_timeout_list
*
1125 get_common_timeout_list(struct event_base
*base
, const struct timeval
*tv
)
1127 return base
->common_timeout_queues
[COMMON_TIMEOUT_IDX(tv
)];
1132 common_timeout_ok(const struct timeval
*tv
,
1133 struct event_base
*base
)
1135 const struct timeval
*expect
=
1136 &get_common_timeout_list(base
, tv
)->duration
;
1137 return tv
->tv_sec
== expect
->tv_sec
&&
1138 tv
->tv_usec
== expect
->tv_usec
;
1142 /* Add the timeout for the first event in given common timeout list to the
1143 * event_base's minheap. */
1145 common_timeout_schedule(struct common_timeout_list
*ctl
,
1146 const struct timeval
*now
, struct event
*head
)
1148 struct timeval timeout
= head
->ev_timeout
;
1149 timeout
.tv_usec
&= MICROSECONDS_MASK
;
1150 event_add_internal(&ctl
->timeout_event
, &timeout
, 1);
1153 /* Callback: invoked when the timeout for a common timeout queue triggers.
1154 * This means that (at least) the first event in that queue should be run,
1155 * and the timeout should be rescheduled if there are more events. */
1157 common_timeout_callback(evutil_socket_t fd
, short what
, void *arg
)
1160 struct common_timeout_list
*ctl
= arg
;
1161 struct event_base
*base
= ctl
->base
;
1162 struct event
*ev
= NULL
;
1163 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
1164 gettime(base
, &now
);
1166 ev
= TAILQ_FIRST(&ctl
->events
);
1167 if (!ev
|| ev
->ev_timeout
.tv_sec
> now
.tv_sec
||
1168 (ev
->ev_timeout
.tv_sec
== now
.tv_sec
&&
1169 (ev
->ev_timeout
.tv_usec
&MICROSECONDS_MASK
) > now
.tv_usec
))
1171 event_del_internal(ev
);
1172 event_active_nolock(ev
, EV_TIMEOUT
, 1);
1175 common_timeout_schedule(ctl
, &now
, ev
);
1176 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1179 #define MAX_COMMON_TIMEOUTS 256
1181 const struct timeval
*
1182 event_base_init_common_timeout(struct event_base
*base
,
1183 const struct timeval
*duration
)
1187 const struct timeval
*result
=NULL
;
1188 struct common_timeout_list
*new_ctl
;
1190 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
1191 if (duration
->tv_usec
> 1000000) {
1192 memcpy(&tv
, duration
, sizeof(struct timeval
));
1193 if (is_common_timeout(duration
, base
))
1194 tv
.tv_usec
&= MICROSECONDS_MASK
;
1195 tv
.tv_sec
+= tv
.tv_usec
/ 1000000;
1196 tv
.tv_usec
%= 1000000;
1199 for (i
= 0; i
< base
->n_common_timeouts
; ++i
) {
1200 const struct common_timeout_list
*ctl
=
1201 base
->common_timeout_queues
[i
];
1202 if (duration
->tv_sec
== ctl
->duration
.tv_sec
&&
1203 duration
->tv_usec
==
1204 (ctl
->duration
.tv_usec
& MICROSECONDS_MASK
)) {
1205 EVUTIL_ASSERT(is_common_timeout(&ctl
->duration
, base
));
1206 result
= &ctl
->duration
;
1210 if (base
->n_common_timeouts
== MAX_COMMON_TIMEOUTS
) {
1211 event_warnx("%s: Too many common timeouts already in use; "
1212 "we only support %d per event_base", __func__
,
1213 MAX_COMMON_TIMEOUTS
);
1216 if (base
->n_common_timeouts_allocated
== base
->n_common_timeouts
) {
1217 int n
= base
->n_common_timeouts
< 16 ? 16 :
1218 base
->n_common_timeouts
*2;
1219 struct common_timeout_list
**newqueues
=
1220 mm_realloc(base
->common_timeout_queues
,
1221 n
*sizeof(struct common_timeout_queue
*));
1223 event_warn("%s: realloc",__func__
);
1226 base
->n_common_timeouts_allocated
= n
;
1227 base
->common_timeout_queues
= newqueues
;
1229 new_ctl
= mm_calloc(1, sizeof(struct common_timeout_list
));
1231 event_warn("%s: calloc",__func__
);
1234 TAILQ_INIT(&new_ctl
->events
);
1235 new_ctl
->duration
.tv_sec
= duration
->tv_sec
;
1236 new_ctl
->duration
.tv_usec
=
1237 duration
->tv_usec
| COMMON_TIMEOUT_MAGIC
|
1238 (base
->n_common_timeouts
<< COMMON_TIMEOUT_IDX_SHIFT
);
1239 evtimer_assign(&new_ctl
->timeout_event
, base
,
1240 common_timeout_callback
, new_ctl
);
1241 new_ctl
->timeout_event
.ev_flags
|= EVLIST_INTERNAL
;
1242 event_priority_set(&new_ctl
->timeout_event
, 0);
1243 new_ctl
->base
= base
;
1244 base
->common_timeout_queues
[base
->n_common_timeouts
++] = new_ctl
;
1245 result
= &new_ctl
->duration
;
1249 EVUTIL_ASSERT(is_common_timeout(result
, base
));
1251 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1255 /* Closure function invoked when we're activating a persistent event. */
1257 event_persist_closure(struct event_base
*base
, struct event
*ev
)
1259 /* reschedule the persistent event if we have a timeout. */
1260 if (ev
->ev_io_timeout
.tv_sec
|| ev
->ev_io_timeout
.tv_usec
) {
1261 /* If there was a timeout, we want it to run at an interval of
1262 * ev_io_timeout after the last time it was _scheduled_ for,
1263 * not ev_io_timeout after _now_. If it fired for another
1264 * reason, though, the timeout ought to start ticking _now_. */
1265 struct timeval run_at
, relative_to
, delay
, now
;
1266 ev_uint32_t usec_mask
= 0;
1267 EVUTIL_ASSERT(is_same_common_timeout(&ev
->ev_timeout
,
1268 &ev
->ev_io_timeout
));
1269 gettime(base
, &now
);
1270 if (is_common_timeout(&ev
->ev_timeout
, base
)) {
1271 delay
= ev
->ev_io_timeout
;
1272 usec_mask
= delay
.tv_usec
& ~MICROSECONDS_MASK
;
1273 delay
.tv_usec
&= MICROSECONDS_MASK
;
1274 if (ev
->ev_res
& EV_TIMEOUT
) {
1275 relative_to
= ev
->ev_timeout
;
1276 relative_to
.tv_usec
&= MICROSECONDS_MASK
;
1281 delay
= ev
->ev_io_timeout
;
1282 if (ev
->ev_res
& EV_TIMEOUT
) {
1283 relative_to
= ev
->ev_timeout
;
1288 evutil_timeradd(&relative_to
, &delay
, &run_at
);
1289 if (evutil_timercmp(&run_at
, &now
, <)) {
1290 /* Looks like we missed at least one invocation due to
1291 * a clock jump, not running the event loop for a
1292 * while, really slow callbacks, or
1293 * something. Reschedule relative to now.
1295 evutil_timeradd(&now
, &delay
, &run_at
);
1297 run_at
.tv_usec
|= usec_mask
;
1298 event_add_internal(ev
, &run_at
, 1);
1300 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1301 (*ev
->ev_callback
)(ev
->ev_fd
, ev
->ev_res
, ev
->ev_arg
);
1305 Helper for event_process_active to process all the events in a single queue,
1306 releasing the lock as we go. This function requires that the lock be held
1307 when it's invoked. Returns -1 if we get a signal or an event_break that
1308 means we should stop processing any active events now. Otherwise returns
1309 the number of non-internal events that we processed.
1312 event_process_active_single_queue(struct event_base
*base
,
1313 struct event_list
*activeq
)
1318 EVUTIL_ASSERT(activeq
!= NULL
);
1320 for (ev
= TAILQ_FIRST(activeq
); ev
; ev
= TAILQ_FIRST(activeq
)) {
1321 if (ev
->ev_events
& EV_PERSIST
)
1322 event_queue_remove(base
, ev
, EVLIST_ACTIVE
);
1324 event_del_internal(ev
);
1325 if (!(ev
->ev_flags
& EVLIST_INTERNAL
))
1329 "event_process_active: event: %p, %s%scall %p",
1331 ev
->ev_res
& EV_READ
? "EV_READ " : " ",
1332 ev
->ev_res
& EV_WRITE
? "EV_WRITE " : " ",
1335 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
1336 base
->current_event
= ev
;
1337 base
->current_event_waiters
= 0;
1340 switch (ev
->ev_closure
) {
1341 case EV_CLOSURE_SIGNAL
:
1342 event_signal_closure(base
, ev
);
1344 case EV_CLOSURE_PERSIST
:
1345 event_persist_closure(base
, ev
);
1348 case EV_CLOSURE_NONE
:
1349 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1351 ev
->ev_fd
, ev
->ev_res
, ev
->ev_arg
);
1355 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
1356 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
1357 base
->current_event
= NULL
;
1358 if (base
->current_event_waiters
) {
1359 base
->current_event_waiters
= 0;
1360 EVTHREAD_COND_BROADCAST(base
->current_event_cond
);
1364 if (base
->event_break
)
1366 if (base
->event_continue
)
1373 Process up to MAX_DEFERRED of the defered_cb entries in 'queue'. If
1374 *breakptr becomes set to 1, stop. Requires that we start out holding
1375 the lock on 'queue'; releases the lock around 'queue' for each deferred_cb
1379 event_process_deferred_callbacks(struct deferred_cb_queue
*queue
, int *breakptr
)
1382 struct deferred_cb
*cb
;
1384 #define MAX_DEFERRED 16
1385 while ((cb
= TAILQ_FIRST(&queue
->deferred_cb_list
))) {
1387 TAILQ_REMOVE(&queue
->deferred_cb_list
, cb
, cb_next
);
1388 --queue
->active_count
;
1389 UNLOCK_DEFERRED_QUEUE(queue
);
1391 cb
->cb(cb
, cb
->arg
);
1393 LOCK_DEFERRED_QUEUE(queue
);
1396 if (++count
== MAX_DEFERRED
)
1404 * Active events are stored in priority queues. Lower priorities are always
1405 * process before higher priorities. Low priority events can starve high
1410 event_process_active(struct event_base
*base
)
1412 /* Caller must hold th_base_lock */
1413 struct event_list
*activeq
= NULL
;
1416 for (i
= 0; i
< base
->nactivequeues
; ++i
) {
1417 if (TAILQ_FIRST(&base
->activequeues
[i
]) != NULL
) {
1418 base
->event_running_priority
= i
;
1419 activeq
= &base
->activequeues
[i
];
1420 c
= event_process_active_single_queue(base
, activeq
);
1422 base
->event_running_priority
= -1;
1425 break; /* Processed a real event; do not
1426 * consider lower-priority events */
1427 /* If we get here, all of the events we processed
1428 * were internal. Continue. */
1432 event_process_deferred_callbacks(&base
->defer_queue
,&base
->event_break
);
1433 base
->event_running_priority
= -1;
1438 * Wait continuously for events. We exit only if no events are left.
1442 event_dispatch(void)
1444 return (event_loop(0));
1448 event_base_dispatch(struct event_base
*event_base
)
1450 return (event_base_loop(event_base
, 0));
1454 event_base_get_method(const struct event_base
*base
)
1456 EVUTIL_ASSERT(base
);
1457 return (base
->evsel
->name
);
1460 /** Callback: used to implement event_base_loopexit by telling the event_base
1461 * that it's time to exit its loop. */
1463 event_loopexit_cb(evutil_socket_t fd
, short what
, void *arg
)
1465 struct event_base
*base
= arg
;
1466 base
->event_gotterm
= 1;
1470 event_loopexit(const struct timeval
*tv
)
1472 return (event_once(-1, EV_TIMEOUT
, event_loopexit_cb
,
1477 event_base_loopexit(struct event_base
*event_base
, const struct timeval
*tv
)
1479 return (event_base_once(event_base
, -1, EV_TIMEOUT
, event_loopexit_cb
,
1484 event_loopbreak(void)
1486 return (event_base_loopbreak(current_base
));
1490 event_base_loopbreak(struct event_base
*event_base
)
1493 if (event_base
== NULL
)
1496 EVBASE_ACQUIRE_LOCK(event_base
, th_base_lock
);
1497 event_base
->event_break
= 1;
1499 if (EVBASE_NEED_NOTIFY(event_base
)) {
1500 r
= evthread_notify_base(event_base
);
1504 EVBASE_RELEASE_LOCK(event_base
, th_base_lock
);
1509 event_base_got_break(struct event_base
*event_base
)
1512 EVBASE_ACQUIRE_LOCK(event_base
, th_base_lock
);
1513 res
= event_base
->event_break
;
1514 EVBASE_RELEASE_LOCK(event_base
, th_base_lock
);
1519 event_base_got_exit(struct event_base
*event_base
)
1522 EVBASE_ACQUIRE_LOCK(event_base
, th_base_lock
);
1523 res
= event_base
->event_gotterm
;
1524 EVBASE_RELEASE_LOCK(event_base
, th_base_lock
);
1528 /* not thread safe */
1531 event_loop(int flags
)
1533 return event_base_loop(current_base
, flags
);
1537 event_base_loop(struct event_base
*base
, int flags
)
1539 const struct eventop
*evsel
= base
->evsel
;
1541 struct timeval
*tv_p
;
1542 int res
, done
, retval
= 0;
1544 /* Grab the lock. We will release it inside evsel.dispatch, and again
1545 * as we invoke user callbacks. */
1546 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
1548 if (base
->running_loop
) {
1549 event_warnx("%s: reentrant invocation. Only one event_base_loop"
1550 " can run on each event_base at once.", __func__
);
1551 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1555 base
->running_loop
= 1;
1557 clear_time_cache(base
);
1559 if (base
->sig
.ev_signal_added
&& base
->sig
.ev_n_signals_added
)
1560 evsig_set_base(base
);
1564 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
1565 base
->th_owner_id
= EVTHREAD_GET_ID();
1568 base
->event_gotterm
= base
->event_break
= 0;
1571 base
->event_continue
= 0;
1573 /* Terminate the loop if we have been asked to */
1574 if (base
->event_gotterm
) {
1578 if (base
->event_break
) {
1582 timeout_correct(base
, &tv
);
1585 if (!N_ACTIVE_CALLBACKS(base
) && !(flags
& EVLOOP_NONBLOCK
)) {
1586 timeout_next(base
, &tv_p
);
1589 * if we have active events, we just poll new events
1592 evutil_timerclear(&tv
);
1595 /* If we have no events, we just exit */
1596 if (!event_haveevents(base
) && !N_ACTIVE_CALLBACKS(base
)) {
1597 event_debug(("%s: no events registered.", __func__
));
1602 /* update last old time */
1603 gettime(base
, &base
->event_tv
);
1605 clear_time_cache(base
);
1607 res
= evsel
->dispatch(base
, tv_p
);
1610 event_debug(("%s: dispatch returned unsuccessfully.",
1616 update_time_cache(base
);
1618 timeout_process(base
);
1620 if (N_ACTIVE_CALLBACKS(base
)) {
1621 int n
= event_process_active(base
);
1622 if ((flags
& EVLOOP_ONCE
)
1623 && N_ACTIVE_CALLBACKS(base
) == 0
1626 } else if (flags
& EVLOOP_NONBLOCK
)
1629 event_debug(("%s: asked to terminate loop.", __func__
));
1632 clear_time_cache(base
);
1633 base
->running_loop
= 0;
1635 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
1640 /* Sets up an event for processing once */
1644 void (*cb
)(evutil_socket_t
, short, void *);
1648 /* One-time callback to implement event_base_once: invokes the user callback,
1649 * then deletes the allocated storage */
1651 event_once_cb(evutil_socket_t fd
, short events
, void *arg
)
1653 struct event_once
*eonce
= arg
;
1655 (*eonce
->cb
)(fd
, events
, eonce
->arg
);
1656 event_debug_unassign(&eonce
->ev
);
1660 /* not threadsafe, event scheduled once. */
1662 event_once(evutil_socket_t fd
, short events
,
1663 void (*callback
)(evutil_socket_t
, short, void *),
1664 void *arg
, const struct timeval
*tv
)
1666 return event_base_once(current_base
, fd
, events
, callback
, arg
, tv
);
1669 /* Schedules an event once */
1671 event_base_once(struct event_base
*base
, evutil_socket_t fd
, short events
,
1672 void (*callback
)(evutil_socket_t
, short, void *),
1673 void *arg
, const struct timeval
*tv
)
1675 struct event_once
*eonce
;
1679 /* We cannot support signals that just fire once, or persistent
1681 if (events
& (EV_SIGNAL
|EV_PERSIST
))
1684 if ((eonce
= mm_calloc(1, sizeof(struct event_once
))) == NULL
)
1687 eonce
->cb
= callback
;
1690 if (events
== EV_TIMEOUT
) {
1692 evutil_timerclear(&etv
);
1696 evtimer_assign(&eonce
->ev
, base
, event_once_cb
, eonce
);
1697 } else if (events
& (EV_READ
|EV_WRITE
)) {
1698 events
&= EV_READ
|EV_WRITE
;
1700 event_assign(&eonce
->ev
, base
, fd
, events
, event_once_cb
, eonce
);
1702 /* Bad event combination */
1708 res
= event_add(&eonce
->ev
, tv
);
1718 event_assign(struct event
*ev
, struct event_base
*base
, evutil_socket_t fd
, short events
, void (*callback
)(evutil_socket_t
, short, void *), void *arg
)
1721 base
= current_base
;
1723 _event_debug_assert_not_added(ev
);
1727 ev
->ev_callback
= callback
;
1730 ev
->ev_events
= events
;
1732 ev
->ev_flags
= EVLIST_INIT
;
1734 ev
->ev_pncalls
= NULL
;
1736 if (events
& EV_SIGNAL
) {
1737 if ((events
& (EV_READ
|EV_WRITE
)) != 0) {
1738 event_warnx("%s: EV_SIGNAL is not compatible with "
1739 "EV_READ or EV_WRITE", __func__
);
1742 ev
->ev_closure
= EV_CLOSURE_SIGNAL
;
1744 if (events
& EV_PERSIST
) {
1745 evutil_timerclear(&ev
->ev_io_timeout
);
1746 ev
->ev_closure
= EV_CLOSURE_PERSIST
;
1748 ev
->ev_closure
= EV_CLOSURE_NONE
;
1752 min_heap_elem_init(ev
);
1755 /* by default, we put new events into the middle priority */
1756 ev
->ev_pri
= base
->nactivequeues
/ 2;
1759 _event_debug_note_setup(ev
);
1765 event_base_set(struct event_base
*base
, struct event
*ev
)
1767 /* Only innocent events may be assigned to a different base */
1768 if (ev
->ev_flags
!= EVLIST_INIT
)
1771 _event_debug_assert_is_setup(ev
);
1774 ev
->ev_pri
= base
->nactivequeues
/2;
1780 event_set(struct event
*ev
, evutil_socket_t fd
, short events
,
1781 void (*callback
)(evutil_socket_t
, short, void *), void *arg
)
1784 r
= event_assign(ev
, current_base
, fd
, events
, callback
, arg
);
1785 EVUTIL_ASSERT(r
== 0);
1789 event_new(struct event_base
*base
, evutil_socket_t fd
, short events
, void (*cb
)(evutil_socket_t
, short, void *), void *arg
)
1792 ev
= mm_malloc(sizeof(struct event
));
1795 if (event_assign(ev
, base
, fd
, events
, cb
, arg
) < 0) {
1804 event_free(struct event
*ev
)
1806 _event_debug_assert_is_setup(ev
);
1808 /* make sure that this event won't be coming back to haunt us. */
1810 _event_debug_note_teardown(ev
);
1816 event_debug_unassign(struct event
*ev
)
1818 _event_debug_assert_not_added(ev
);
1819 _event_debug_note_teardown(ev
);
1821 ev
->ev_flags
&= ~EVLIST_INIT
;
1825 * Set's the priority of an event - if an event is already scheduled
1826 * changing the priority is going to fail.
1830 event_priority_set(struct event
*ev
, int pri
)
1832 _event_debug_assert_is_setup(ev
);
1834 if (ev
->ev_flags
& EVLIST_ACTIVE
)
1836 if (pri
< 0 || pri
>= ev
->ev_base
->nactivequeues
)
1845 * Checks if a specific event is pending or scheduled.
1849 event_pending(const struct event
*ev
, short event
, struct timeval
*tv
)
1853 if (EVUTIL_FAILURE_CHECK(ev
->ev_base
== NULL
)) {
1854 event_warnx("%s: event has no event_base set.", __func__
);
1858 EVBASE_ACQUIRE_LOCK(ev
->ev_base
, th_base_lock
);
1859 _event_debug_assert_is_setup(ev
);
1861 if (ev
->ev_flags
& EVLIST_INSERTED
)
1862 flags
|= (ev
->ev_events
& (EV_READ
|EV_WRITE
|EV_SIGNAL
));
1863 if (ev
->ev_flags
& EVLIST_ACTIVE
)
1864 flags
|= ev
->ev_res
;
1865 if (ev
->ev_flags
& EVLIST_TIMEOUT
)
1866 flags
|= EV_TIMEOUT
;
1868 event
&= (EV_TIMEOUT
|EV_READ
|EV_WRITE
|EV_SIGNAL
);
1870 /* See if there is a timeout that we should report */
1871 if (tv
!= NULL
&& (flags
& event
& EV_TIMEOUT
)) {
1872 struct timeval tmp
= ev
->ev_timeout
;
1873 tmp
.tv_usec
&= MICROSECONDS_MASK
;
1874 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1875 /* correctly remamp to real time */
1876 evutil_timeradd(&ev
->ev_base
->tv_clock_diff
, &tmp
, tv
);
1882 EVBASE_RELEASE_LOCK(ev
->ev_base
, th_base_lock
);
1884 return (flags
& event
);
1888 event_initialized(const struct event
*ev
)
1890 if (!(ev
->ev_flags
& EVLIST_INIT
))
1897 event_get_assignment(const struct event
*event
, struct event_base
**base_out
, evutil_socket_t
*fd_out
, short *events_out
, event_callback_fn
*callback_out
, void **arg_out
)
1899 _event_debug_assert_is_setup(event
);
1902 *base_out
= event
->ev_base
;
1904 *fd_out
= event
->ev_fd
;
1906 *events_out
= event
->ev_events
;
1908 *callback_out
= event
->ev_callback
;
1910 *arg_out
= event
->ev_arg
;
1914 event_get_struct_event_size(void)
1916 return sizeof(struct event
);
1920 event_get_fd(const struct event
*ev
)
1922 _event_debug_assert_is_setup(ev
);
1927 event_get_base(const struct event
*ev
)
1929 _event_debug_assert_is_setup(ev
);
1934 event_get_events(const struct event
*ev
)
1936 _event_debug_assert_is_setup(ev
);
1937 return ev
->ev_events
;
1941 event_get_callback(const struct event
*ev
)
1943 _event_debug_assert_is_setup(ev
);
1944 return ev
->ev_callback
;
1948 event_get_callback_arg(const struct event
*ev
)
1950 _event_debug_assert_is_setup(ev
);
1955 event_add(struct event
*ev
, const struct timeval
*tv
)
1959 if (EVUTIL_FAILURE_CHECK(!ev
->ev_base
)) {
1960 event_warnx("%s: event has no event_base set.", __func__
);
1964 EVBASE_ACQUIRE_LOCK(ev
->ev_base
, th_base_lock
);
1966 res
= event_add_internal(ev
, tv
, 0);
1968 EVBASE_RELEASE_LOCK(ev
->ev_base
, th_base_lock
);
1973 /* Helper callback: wake an event_base from another thread. This version
1974 * works by writing a byte to one end of a socketpair, so that the event_base
1975 * listening on the other end will wake up as the corresponding event
1978 evthread_notify_base_default(struct event_base
*base
)
1984 r
= send(base
->th_notify_fd
[1], buf
, 1, 0);
1986 r
= write(base
->th_notify_fd
[1], buf
, 1);
1988 return (r
< 0 && errno
!= EAGAIN
) ? -1 : 0;
1991 #if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
1992 /* Helper callback: wake an event_base from another thread. This version
1993 * assumes that you have a working eventfd() implementation. */
1995 evthread_notify_base_eventfd(struct event_base
*base
)
1997 ev_uint64_t msg
= 1;
2000 r
= write(base
->th_notify_fd
[0], (void*) &msg
, sizeof(msg
));
2001 } while (r
< 0 && errno
== EAGAIN
);
2003 return (r
< 0) ? -1 : 0;
2007 /** Tell the thread currently running the event_loop for base (if any) that it
2008 * needs to stop waiting in its dispatch function (if it is) and process all
2009 * active events and deferred callbacks (if there are any). */
2011 evthread_notify_base(struct event_base
*base
)
2013 EVENT_BASE_ASSERT_LOCKED(base
);
2014 if (!base
->th_notify_fn
)
2016 if (base
->is_notify_pending
)
2018 base
->is_notify_pending
= 1;
2019 return base
->th_notify_fn(base
);
2022 /* Implementation function to add an event. Works just like event_add,
2023 * except: 1) it requires that we have the lock. 2) if tv_is_absolute is set,
2024 * we treat tv as an absolute time, not as an interval to add to the current
2027 event_add_internal(struct event
*ev
, const struct timeval
*tv
,
2030 struct event_base
*base
= ev
->ev_base
;
2034 EVENT_BASE_ASSERT_LOCKED(base
);
2035 _event_debug_assert_is_setup(ev
);
2038 "event_add: event: %p (fd "EV_SOCK_FMT
"), %s%s%scall %p",
2040 EV_SOCK_ARG(ev
->ev_fd
),
2041 ev
->ev_events
& EV_READ
? "EV_READ " : " ",
2042 ev
->ev_events
& EV_WRITE
? "EV_WRITE " : " ",
2043 tv
? "EV_TIMEOUT " : " ",
2046 EVUTIL_ASSERT(!(ev
->ev_flags
& ~EVLIST_ALL
));
2049 * prepare for timeout insertion further below, if we get a
2050 * failure on any step, we should not change any state.
2052 if (tv
!= NULL
&& !(ev
->ev_flags
& EVLIST_TIMEOUT
)) {
2053 if (min_heap_reserve(&base
->timeheap
,
2054 1 + min_heap_size(&base
->timeheap
)) == -1)
2055 return (-1); /* ENOMEM == errno */
2058 /* If the main thread is currently executing a signal event's
2059 * callback, and we are not the main thread, then we want to wait
2060 * until the callback is done before we mess with the event, or else
2061 * we can race on ev_ncalls and ev_pncalls below. */
2062 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
2063 if (base
->current_event
== ev
&& (ev
->ev_events
& EV_SIGNAL
)
2064 && !EVBASE_IN_THREAD(base
)) {
2065 ++base
->current_event_waiters
;
2066 EVTHREAD_COND_WAIT(base
->current_event_cond
, base
->th_base_lock
);
2070 if ((ev
->ev_events
& (EV_READ
|EV_WRITE
|EV_SIGNAL
)) &&
2071 !(ev
->ev_flags
& (EVLIST_INSERTED
|EVLIST_ACTIVE
))) {
2072 if (ev
->ev_events
& (EV_READ
|EV_WRITE
))
2073 res
= evmap_io_add(base
, ev
->ev_fd
, ev
);
2074 else if (ev
->ev_events
& EV_SIGNAL
)
2075 res
= evmap_signal_add(base
, (int)ev
->ev_fd
, ev
);
2077 event_queue_insert(base
, ev
, EVLIST_INSERTED
);
2079 /* evmap says we need to notify the main thread. */
2086 * we should change the timeout state only if the previous event
2087 * addition succeeded.
2089 if (res
!= -1 && tv
!= NULL
) {
2094 * for persistent timeout events, we remember the
2095 * timeout value and re-add the event.
2097 * If tv_is_absolute, this was already set.
2099 if (ev
->ev_closure
== EV_CLOSURE_PERSIST
&& !tv_is_absolute
)
2100 ev
->ev_io_timeout
= *tv
;
2103 * we already reserved memory above for the case where we
2104 * are not replacing an existing timeout.
2106 if (ev
->ev_flags
& EVLIST_TIMEOUT
) {
2107 /* XXX I believe this is needless. */
2108 if (min_heap_elt_is_top(ev
))
2110 event_queue_remove(base
, ev
, EVLIST_TIMEOUT
);
2113 /* Check if it is active due to a timeout. Rescheduling
2114 * this timeout before the callback can be executed
2115 * removes it from the active list. */
2116 if ((ev
->ev_flags
& EVLIST_ACTIVE
) &&
2117 (ev
->ev_res
& EV_TIMEOUT
)) {
2118 if (ev
->ev_events
& EV_SIGNAL
) {
2119 /* See if we are just active executing
2120 * this event in a loop
2122 if (ev
->ev_ncalls
&& ev
->ev_pncalls
) {
2124 *ev
->ev_pncalls
= 0;
2128 event_queue_remove(base
, ev
, EVLIST_ACTIVE
);
2131 gettime(base
, &now
);
2133 common_timeout
= is_common_timeout(tv
, base
);
2134 if (tv_is_absolute
) {
2135 ev
->ev_timeout
= *tv
;
2136 } else if (common_timeout
) {
2137 struct timeval tmp
= *tv
;
2138 tmp
.tv_usec
&= MICROSECONDS_MASK
;
2139 evutil_timeradd(&now
, &tmp
, &ev
->ev_timeout
);
2140 ev
->ev_timeout
.tv_usec
|=
2141 (tv
->tv_usec
& ~MICROSECONDS_MASK
);
2143 evutil_timeradd(&now
, tv
, &ev
->ev_timeout
);
2147 "event_add: timeout in %d seconds, call %p",
2148 (int)tv
->tv_sec
, ev
->ev_callback
));
2150 event_queue_insert(base
, ev
, EVLIST_TIMEOUT
);
2151 if (common_timeout
) {
2152 struct common_timeout_list
*ctl
=
2153 get_common_timeout_list(base
, &ev
->ev_timeout
);
2154 if (ev
== TAILQ_FIRST(&ctl
->events
)) {
2155 common_timeout_schedule(ctl
, &now
, ev
);
2158 /* See if the earliest timeout is now earlier than it
2159 * was before: if so, we will need to tell the main
2160 * thread to wake up earlier than it would
2162 if (min_heap_elt_is_top(ev
))
2167 /* if we are not in the right thread, we need to wake up the loop */
2168 if (res
!= -1 && notify
&& EVBASE_NEED_NOTIFY(base
))
2169 evthread_notify_base(base
);
2171 _event_debug_note_add(ev
);
2177 event_del(struct event
*ev
)
2181 if (EVUTIL_FAILURE_CHECK(!ev
->ev_base
)) {
2182 event_warnx("%s: event has no event_base set.", __func__
);
2186 EVBASE_ACQUIRE_LOCK(ev
->ev_base
, th_base_lock
);
2188 res
= event_del_internal(ev
);
2190 EVBASE_RELEASE_LOCK(ev
->ev_base
, th_base_lock
);
2195 /* Helper for event_del: always called with th_base_lock held. */
2197 event_del_internal(struct event
*ev
)
2199 struct event_base
*base
;
2200 int res
= 0, notify
= 0;
2202 event_debug(("event_del: %p (fd "EV_SOCK_FMT
"), callback %p",
2203 ev
, EV_SOCK_ARG(ev
->ev_fd
), ev
->ev_callback
));
2205 /* An event without a base has not been added */
2206 if (ev
->ev_base
== NULL
)
2209 EVENT_BASE_ASSERT_LOCKED(ev
->ev_base
);
2211 /* If the main thread is currently executing this event's callback,
2212 * and we are not the main thread, then we want to wait until the
2213 * callback is done before we start removing the event. That way,
2214 * when this function returns, it will be safe to free the
2215 * user-supplied argument. */
2217 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
2218 if (base
->current_event
== ev
&& !EVBASE_IN_THREAD(base
)) {
2219 ++base
->current_event_waiters
;
2220 EVTHREAD_COND_WAIT(base
->current_event_cond
, base
->th_base_lock
);
2224 EVUTIL_ASSERT(!(ev
->ev_flags
& ~EVLIST_ALL
));
2226 /* See if we are just active executing this event in a loop */
2227 if (ev
->ev_events
& EV_SIGNAL
) {
2228 if (ev
->ev_ncalls
&& ev
->ev_pncalls
) {
2230 *ev
->ev_pncalls
= 0;
2234 if (ev
->ev_flags
& EVLIST_TIMEOUT
) {
2235 /* NOTE: We never need to notify the main thread because of a
2236 * deleted timeout event: all that could happen if we don't is
2237 * that the dispatch loop might wake up too early. But the
2238 * point of notifying the main thread _is_ to wake up the
2239 * dispatch loop early anyway, so we wouldn't gain anything by
2242 event_queue_remove(base
, ev
, EVLIST_TIMEOUT
);
2245 if (ev
->ev_flags
& EVLIST_ACTIVE
)
2246 event_queue_remove(base
, ev
, EVLIST_ACTIVE
);
2248 if (ev
->ev_flags
& EVLIST_INSERTED
) {
2249 event_queue_remove(base
, ev
, EVLIST_INSERTED
);
2250 if (ev
->ev_events
& (EV_READ
|EV_WRITE
))
2251 res
= evmap_io_del(base
, ev
->ev_fd
, ev
);
2253 res
= evmap_signal_del(base
, (int)ev
->ev_fd
, ev
);
2255 /* evmap says we need to notify the main thread. */
2261 /* if we are not in the right thread, we need to wake up the loop */
2262 if (res
!= -1 && notify
&& EVBASE_NEED_NOTIFY(base
))
2263 evthread_notify_base(base
);
2265 _event_debug_note_del(ev
);
2271 event_active(struct event
*ev
, int res
, short ncalls
)
2273 if (EVUTIL_FAILURE_CHECK(!ev
->ev_base
)) {
2274 event_warnx("%s: event has no event_base set.", __func__
);
2278 EVBASE_ACQUIRE_LOCK(ev
->ev_base
, th_base_lock
);
2280 _event_debug_assert_is_setup(ev
);
2282 event_active_nolock(ev
, res
, ncalls
);
2284 EVBASE_RELEASE_LOCK(ev
->ev_base
, th_base_lock
);
2289 event_active_nolock(struct event
*ev
, int res
, short ncalls
)
2291 struct event_base
*base
;
2293 event_debug(("event_active: %p (fd "EV_SOCK_FMT
"), res %d, callback %p",
2294 ev
, EV_SOCK_ARG(ev
->ev_fd
), (int)res
, ev
->ev_callback
));
2297 /* We get different kinds of events, add them together */
2298 if (ev
->ev_flags
& EVLIST_ACTIVE
) {
2305 EVENT_BASE_ASSERT_LOCKED(base
);
2309 if (ev
->ev_pri
< base
->event_running_priority
)
2310 base
->event_continue
= 1;
2312 if (ev
->ev_events
& EV_SIGNAL
) {
2313 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
2314 if (base
->current_event
== ev
&& !EVBASE_IN_THREAD(base
)) {
2315 ++base
->current_event_waiters
;
2316 EVTHREAD_COND_WAIT(base
->current_event_cond
, base
->th_base_lock
);
2319 ev
->ev_ncalls
= ncalls
;
2320 ev
->ev_pncalls
= NULL
;
2323 event_queue_insert(base
, ev
, EVLIST_ACTIVE
);
2325 if (EVBASE_NEED_NOTIFY(base
))
2326 evthread_notify_base(base
);
2330 event_deferred_cb_init(struct deferred_cb
*cb
, deferred_cb_fn fn
, void *arg
)
2332 memset(cb
, 0, sizeof(struct deferred_cb
));
2338 event_deferred_cb_cancel(struct deferred_cb_queue
*queue
,
2339 struct deferred_cb
*cb
)
2343 queue
= ¤t_base
->defer_queue
;
2348 LOCK_DEFERRED_QUEUE(queue
);
2350 TAILQ_REMOVE(&queue
->deferred_cb_list
, cb
, cb_next
);
2351 --queue
->active_count
;
2354 UNLOCK_DEFERRED_QUEUE(queue
);
2358 event_deferred_cb_schedule(struct deferred_cb_queue
*queue
,
2359 struct deferred_cb
*cb
)
2363 queue
= ¤t_base
->defer_queue
;
2368 LOCK_DEFERRED_QUEUE(queue
);
2371 TAILQ_INSERT_TAIL(&queue
->deferred_cb_list
, cb
, cb_next
);
2372 ++queue
->active_count
;
2373 if (queue
->notify_fn
)
2374 queue
->notify_fn(queue
, queue
->notify_arg
);
2376 UNLOCK_DEFERRED_QUEUE(queue
);
2380 timeout_next(struct event_base
*base
, struct timeval
**tv_p
)
2382 /* Caller must hold th_base_lock */
2385 struct timeval
*tv
= *tv_p
;
2388 ev
= min_heap_top(&base
->timeheap
);
2391 /* if no time-based events are active wait for I/O */
2396 if (gettime(base
, &now
) == -1) {
2401 if (evutil_timercmp(&ev
->ev_timeout
, &now
, <=)) {
2402 evutil_timerclear(tv
);
2406 evutil_timersub(&ev
->ev_timeout
, &now
, tv
);
2408 EVUTIL_ASSERT(tv
->tv_sec
>= 0);
2409 EVUTIL_ASSERT(tv
->tv_usec
>= 0);
2410 event_debug(("timeout_next: in %d seconds", (int)tv
->tv_sec
));
2417 * Determines if the time is running backwards by comparing the current time
2418 * against the last time we checked. Not needed when using clock monotonic.
2419 * If time is running backwards, we adjust the firing time of every event by
2420 * the amount that time seems to have jumped.
2423 timeout_correct(struct event_base
*base
, struct timeval
*tv
)
2425 /* Caller must hold th_base_lock. */
2434 /* Check if time is running backwards */
2437 if (evutil_timercmp(tv
, &base
->event_tv
, >=)) {
2438 base
->event_tv
= *tv
;
2442 event_debug(("%s: time is running backwards, corrected",
2444 evutil_timersub(&base
->event_tv
, tv
, &off
);
2447 * We can modify the key element of the node without destroying
2448 * the minheap property, because we change every element.
2450 pev
= base
->timeheap
.p
;
2451 size
= base
->timeheap
.n
;
2452 for (; size
-- > 0; ++pev
) {
2453 struct timeval
*ev_tv
= &(**pev
).ev_timeout
;
2454 evutil_timersub(ev_tv
, &off
, ev_tv
);
2456 for (i
=0; i
<base
->n_common_timeouts
; ++i
) {
2458 struct common_timeout_list
*ctl
=
2459 base
->common_timeout_queues
[i
];
2460 TAILQ_FOREACH(ev
, &ctl
->events
,
2461 ev_timeout_pos
.ev_next_with_common_timeout
) {
2462 struct timeval
*ev_tv
= &ev
->ev_timeout
;
2463 ev_tv
->tv_usec
&= MICROSECONDS_MASK
;
2464 evutil_timersub(ev_tv
, &off
, ev_tv
);
2465 ev_tv
->tv_usec
|= COMMON_TIMEOUT_MAGIC
|
2466 (i
<<COMMON_TIMEOUT_IDX_SHIFT
);
2470 /* Now remember what the new time turned out to be. */
2471 base
->event_tv
= *tv
;
2474 /* Activate every event whose timeout has elapsed. */
2476 timeout_process(struct event_base
*base
)
2478 /* Caller must hold lock. */
2482 if (min_heap_empty(&base
->timeheap
)) {
2486 gettime(base
, &now
);
2488 while ((ev
= min_heap_top(&base
->timeheap
))) {
2489 if (evutil_timercmp(&ev
->ev_timeout
, &now
, >))
2492 /* delete this event from the I/O queues */
2493 event_del_internal(ev
);
2495 event_debug(("timeout_process: call %p",
2497 event_active_nolock(ev
, EV_TIMEOUT
, 1);
2501 /* Remove 'ev' from 'queue' (EVLIST_...) in base. */
2503 event_queue_remove(struct event_base
*base
, struct event
*ev
, int queue
)
2505 EVENT_BASE_ASSERT_LOCKED(base
);
2507 if (!(ev
->ev_flags
& queue
)) {
2508 event_errx(1, "%s: %p(fd "EV_SOCK_FMT
") not on queue %x", __func__
,
2509 ev
, EV_SOCK_ARG(ev
->ev_fd
), queue
);
2513 if (~ev
->ev_flags
& EVLIST_INTERNAL
)
2514 base
->event_count
--;
2516 ev
->ev_flags
&= ~queue
;
2518 case EVLIST_INSERTED
:
2519 TAILQ_REMOVE(&base
->eventqueue
, ev
, ev_next
);
2522 base
->event_count_active
--;
2523 TAILQ_REMOVE(&base
->activequeues
[ev
->ev_pri
],
2524 ev
, ev_active_next
);
2526 case EVLIST_TIMEOUT
:
2527 if (is_common_timeout(&ev
->ev_timeout
, base
)) {
2528 struct common_timeout_list
*ctl
=
2529 get_common_timeout_list(base
, &ev
->ev_timeout
);
2530 TAILQ_REMOVE(&ctl
->events
, ev
,
2531 ev_timeout_pos
.ev_next_with_common_timeout
);
2533 min_heap_erase(&base
->timeheap
, ev
);
2537 event_errx(1, "%s: unknown queue %x", __func__
, queue
);
2541 /* Add 'ev' to the common timeout list in 'ev'. */
2543 insert_common_timeout_inorder(struct common_timeout_list
*ctl
,
2547 /* By all logic, we should just be able to append 'ev' to the end of
2548 * ctl->events, since the timeout on each 'ev' is set to {the common
2549 * timeout} + {the time when we add the event}, and so the events
2550 * should arrive in order of their timeeouts. But just in case
2551 * there's some wacky threading issue going on, we do a search from
2552 * the end of 'ev' to find the right insertion point.
2554 TAILQ_FOREACH_REVERSE(e
, &ctl
->events
,
2555 event_list
, ev_timeout_pos
.ev_next_with_common_timeout
) {
2556 /* This timercmp is a little sneaky, since both ev and e have
2557 * magic values in tv_usec. Fortunately, they ought to have
2558 * the _same_ magic values in tv_usec. Let's assert for that.
2561 is_same_common_timeout(&e
->ev_timeout
, &ev
->ev_timeout
));
2562 if (evutil_timercmp(&ev
->ev_timeout
, &e
->ev_timeout
, >=)) {
2563 TAILQ_INSERT_AFTER(&ctl
->events
, e
, ev
,
2564 ev_timeout_pos
.ev_next_with_common_timeout
);
2568 TAILQ_INSERT_HEAD(&ctl
->events
, ev
,
2569 ev_timeout_pos
.ev_next_with_common_timeout
);
2573 event_queue_insert(struct event_base
*base
, struct event
*ev
, int queue
)
2575 EVENT_BASE_ASSERT_LOCKED(base
);
2577 if (ev
->ev_flags
& queue
) {
2578 /* Double insertion is possible for active events */
2579 if (queue
& EVLIST_ACTIVE
)
2582 event_errx(1, "%s: %p(fd "EV_SOCK_FMT
") already on queue %x", __func__
,
2583 ev
, EV_SOCK_ARG(ev
->ev_fd
), queue
);
2587 if (~ev
->ev_flags
& EVLIST_INTERNAL
)
2588 base
->event_count
++;
2590 ev
->ev_flags
|= queue
;
2592 case EVLIST_INSERTED
:
2593 TAILQ_INSERT_TAIL(&base
->eventqueue
, ev
, ev_next
);
2596 base
->event_count_active
++;
2597 TAILQ_INSERT_TAIL(&base
->activequeues
[ev
->ev_pri
],
2600 case EVLIST_TIMEOUT
: {
2601 if (is_common_timeout(&ev
->ev_timeout
, base
)) {
2602 struct common_timeout_list
*ctl
=
2603 get_common_timeout_list(base
, &ev
->ev_timeout
);
2604 insert_common_timeout_inorder(ctl
, ev
);
2606 min_heap_push(&base
->timeheap
, ev
);
2610 event_errx(1, "%s: unknown queue %x", __func__
, queue
);
2614 /* Functions for debugging */
2617 event_get_version(void)
2619 return (_EVENT_VERSION
);
2623 event_get_version_number(void)
2625 return (_EVENT_NUMERIC_VERSION
);
2629 * No thread-safe interface needed - the information should be the same
2634 event_get_method(void)
2636 return (current_base
->evsel
->name
);
2639 #ifndef _EVENT_DISABLE_MM_REPLACEMENT
2640 static void *(*_mm_malloc_fn
)(size_t sz
) = NULL
;
2641 static void *(*_mm_realloc_fn
)(void *p
, size_t sz
) = NULL
;
2642 static void (*_mm_free_fn
)(void *p
) = NULL
;
2645 event_mm_malloc_(size_t sz
)
2648 return _mm_malloc_fn(sz
);
2654 event_mm_calloc_(size_t count
, size_t size
)
2656 if (_mm_malloc_fn
) {
2657 size_t sz
= count
* size
;
2658 void *p
= _mm_malloc_fn(sz
);
2663 return calloc(count
, size
);
2667 event_mm_strdup_(const char *str
)
2669 if (_mm_malloc_fn
) {
2670 size_t ln
= strlen(str
);
2671 void *p
= _mm_malloc_fn(ln
+1);
2673 memcpy(p
, str
, ln
+1);
2677 return _strdup(str
);
2684 event_mm_realloc_(void *ptr
, size_t sz
)
2687 return _mm_realloc_fn(ptr
, sz
);
2689 return realloc(ptr
, sz
);
2693 event_mm_free_(void *ptr
)
2702 event_set_mem_functions(void *(*malloc_fn
)(size_t sz
),
2703 void *(*realloc_fn
)(void *ptr
, size_t sz
),
2704 void (*free_fn
)(void *ptr
))
2706 _mm_malloc_fn
= malloc_fn
;
2707 _mm_realloc_fn
= realloc_fn
;
2708 _mm_free_fn
= free_fn
;
2712 #if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
2714 evthread_notify_drain_eventfd(evutil_socket_t fd
, short what
, void *arg
)
2718 struct event_base
*base
= arg
;
2720 r
= read(fd
, (void*) &msg
, sizeof(msg
));
2721 if (r
<0 && errno
!= EAGAIN
) {
2722 event_sock_warn(fd
, "Error reading from eventfd");
2724 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
2725 base
->is_notify_pending
= 0;
2726 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
2731 evthread_notify_drain_default(evutil_socket_t fd
, short what
, void *arg
)
2733 unsigned char buf
[1024];
2734 struct event_base
*base
= arg
;
2736 while (recv(fd
, (char*)buf
, sizeof(buf
), 0) > 0)
2739 while (read(fd
, (char*)buf
, sizeof(buf
)) > 0)
2743 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
2744 base
->is_notify_pending
= 0;
2745 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
2749 evthread_make_base_notifiable(struct event_base
*base
)
2751 void (*cb
)(evutil_socket_t
, short, void *) = evthread_notify_drain_default
;
2752 int (*notify
)(struct event_base
*) = evthread_notify_base_default
;
2754 /* XXXX grab the lock here? */
2758 if (base
->th_notify_fd
[0] >= 0)
2761 #if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
2763 #define EFD_CLOEXEC 0
2765 base
->th_notify_fd
[0] = eventfd(0, EFD_CLOEXEC
);
2766 if (base
->th_notify_fd
[0] >= 0) {
2767 evutil_make_socket_closeonexec(base
->th_notify_fd
[0]);
2768 notify
= evthread_notify_base_eventfd
;
2769 cb
= evthread_notify_drain_eventfd
;
2772 #if defined(_EVENT_HAVE_PIPE)
2773 if (base
->th_notify_fd
[0] < 0) {
2774 if ((base
->evsel
->features
& EV_FEATURE_FDS
)) {
2775 if (pipe(base
->th_notify_fd
) < 0) {
2776 event_warn("%s: pipe", __func__
);
2778 evutil_make_socket_closeonexec(base
->th_notify_fd
[0]);
2779 evutil_make_socket_closeonexec(base
->th_notify_fd
[1]);
2786 #define LOCAL_SOCKETPAIR_AF AF_INET
2788 #define LOCAL_SOCKETPAIR_AF AF_UNIX
2790 if (base
->th_notify_fd
[0] < 0) {
2791 if (evutil_socketpair(LOCAL_SOCKETPAIR_AF
, SOCK_STREAM
, 0,
2792 base
->th_notify_fd
) == -1) {
2793 event_sock_warn(-1, "%s: socketpair", __func__
);
2796 evutil_make_socket_closeonexec(base
->th_notify_fd
[0]);
2797 evutil_make_socket_closeonexec(base
->th_notify_fd
[1]);
2801 evutil_make_socket_nonblocking(base
->th_notify_fd
[0]);
2803 base
->th_notify_fn
= notify
;
2806 Making the second socket nonblocking is a bit subtle, given that we
2807 ignore any EAGAIN returns when writing to it, and you don't usally
2808 do that for a nonblocking socket. But if the kernel gives us EAGAIN,
2809 then there's no need to add any more data to the buffer, since
2810 the main thread is already either about to wake up and drain it,
2811 or woken up and in the process of draining it.
2813 if (base
->th_notify_fd
[1] > 0)
2814 evutil_make_socket_nonblocking(base
->th_notify_fd
[1]);
2816 /* prepare an event that we can use for wakeup */
2817 event_assign(&base
->th_notify
, base
, base
->th_notify_fd
[0],
2818 EV_READ
|EV_PERSIST
, cb
, base
);
2820 /* we need to mark this as internal event */
2821 base
->th_notify
.ev_flags
|= EVLIST_INTERNAL
;
2822 event_priority_set(&base
->th_notify
, 0);
2824 return event_add(&base
->th_notify
, NULL
);
2828 event_base_dump_events(struct event_base
*base
, FILE *output
)
2832 fprintf(output
, "Inserted events:\n");
2833 TAILQ_FOREACH(e
, &base
->eventqueue
, ev_next
) {
2834 fprintf(output
, " %p [fd "EV_SOCK_FMT
"]%s%s%s%s%s\n",
2835 (void*)e
, EV_SOCK_ARG(e
->ev_fd
),
2836 (e
->ev_events
&EV_READ
)?" Read":"",
2837 (e
->ev_events
&EV_WRITE
)?" Write":"",
2838 (e
->ev_events
&EV_SIGNAL
)?" Signal":"",
2839 (e
->ev_events
&EV_TIMEOUT
)?" Timeout":"",
2840 (e
->ev_events
&EV_PERSIST
)?" Persist":"");
2843 for (i
= 0; i
< base
->nactivequeues
; ++i
) {
2844 if (TAILQ_EMPTY(&base
->activequeues
[i
]))
2846 fprintf(output
, "Active events [priority %d]:\n", i
);
2847 TAILQ_FOREACH(e
, &base
->eventqueue
, ev_next
) {
2848 fprintf(output
, " %p [fd "EV_SOCK_FMT
"]%s%s%s%s\n",
2849 (void*)e
, EV_SOCK_ARG(e
->ev_fd
),
2850 (e
->ev_res
&EV_READ
)?" Read active":"",
2851 (e
->ev_res
&EV_WRITE
)?" Write active":"",
2852 (e
->ev_res
&EV_SIGNAL
)?" Signal active":"",
2853 (e
->ev_res
&EV_TIMEOUT
)?" Timeout active":"");
2859 event_base_add_virtual(struct event_base
*base
)
2861 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
2862 base
->virtual_event_count
++;
2863 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
2867 event_base_del_virtual(struct event_base
*base
)
2869 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
2870 EVUTIL_ASSERT(base
->virtual_event_count
> 0);
2871 base
->virtual_event_count
--;
2872 if (base
->virtual_event_count
== 0 && EVBASE_NEED_NOTIFY(base
))
2873 evthread_notify_base(base
);
2874 EVBASE_RELEASE_LOCK(base
, th_base_lock
);
2877 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
2879 event_global_setup_locks_(const int enable_locks
)
2881 #ifndef _EVENT_DISABLE_DEBUG_MODE
2882 EVTHREAD_SETUP_GLOBAL_LOCK(_event_debug_map_lock
, 0);
2884 if (evsig_global_setup_locks_(enable_locks
) < 0)
2886 if (evutil_secure_rng_global_setup_locks_(enable_locks
) < 0)
2893 event_base_assert_ok(struct event_base
*base
)
2896 EVBASE_ACQUIRE_LOCK(base
, th_base_lock
);
2897 evmap_check_integrity(base
);
2899 /* Check the heap property */
2900 for (i
= 1; i
< (int)base
->timeheap
.n
; ++i
) {
2901 int parent
= (i
- 1) / 2;
2902 struct event
*ev
, *p_ev
;
2903 ev
= base
->timeheap
.p
[i
];
2904 p_ev
= base
->timeheap
.p
[parent
];
2905 EVUTIL_ASSERT(ev
->ev_flags
& EV_TIMEOUT
);
2906 EVUTIL_ASSERT(evutil_timercmp(&p_ev
->ev_timeout
, &ev
->ev_timeout
, <=));
2907 EVUTIL_ASSERT(ev
->ev_timeout_pos
.min_heap_idx
== i
);
2910 /* Check that the common timeouts are fine */
2911 for (i
= 0; i
< base
->n_common_timeouts
; ++i
) {
2912 struct common_timeout_list
*ctl
= base
->common_timeout_queues
[i
];
2913 struct event
*last
=NULL
, *ev
;
2914 TAILQ_FOREACH(ev
, &ctl
->events
, ev_timeout_pos
.ev_next_with_common_timeout
) {
2916 EVUTIL_ASSERT(evutil_timercmp(&last
->ev_timeout
, &ev
->ev_timeout
, <=));
2917 EVUTIL_ASSERT(ev
->ev_flags
& EV_TIMEOUT
);
2918 EVUTIL_ASSERT(is_common_timeout(&ev
->ev_timeout
,base
));
2919 EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev
->ev_timeout
) == i
);
2924 EVBASE_RELEASE_LOCK(base
, th_base_lock
);