1 /* $NetBSD: evmap.c,v 1.2 2013/04/11 16:56:41 christos Exp $ */
3 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "event2/event-config.h"
28 #include <sys/cdefs.h>
29 __RCSID("$NetBSD: evmap.c,v 1.2 2013/04/11 16:56:41 christos Exp $");
33 #define WIN32_LEAN_AND_MEAN
35 #undef WIN32_LEAN_AND_MEAN
37 #include <sys/types.h>
38 #if !defined(WIN32) && defined(_EVENT_HAVE_SYS_TIME_H)
41 #include <sys/queue.h>
52 #include "event-internal.h"
53 #include "evmap-internal.h"
54 #include "mm-internal.h"
55 #include "changelist-internal.h"
57 /** An entry for an evmap_io list: notes all the events that want to read or
58 write on a given fd, and the number of each.
61 struct event_list events
;
66 /* An entry for an evmap_signal list: notes all the events that want to know
67 when a signal triggers. */
69 struct event_list events
;
72 /* On some platforms, fds start at 0 and increment by 1 as they are
73 allocated, and old numbers get used. For these platforms, we
74 implement io maps just like signal maps: as an array of pointers to
75 struct evmap_io. But on other platforms (windows), sockets are not
76 0-indexed, not necessarily consecutive, and not necessarily reused.
77 There, we use a hashtable to implement evmap_io.
80 struct event_map_entry
{
81 HT_ENTRY(event_map_entry
) map_node
;
83 union { /* This is a union in case we need to make more things that can
84 be in the hashtable. */
85 struct evmap_io evmap_io
;
89 /* Helper used by the event_io_map hashtable code; tries to return a good hash
90 * of the fd in e->fd. */
91 static inline unsigned
92 hashsocket(struct event_map_entry
*e
)
94 /* On win32, in practice, the low 2-3 bits of a SOCKET seem not to
95 * matter. Our hashtable implementation really likes low-order bits,
96 * though, so let's do the rotate-and-add trick. */
97 unsigned h
= (unsigned) e
->fd
;
98 h
+= (h
>> 2) | (h
<< 30);
102 /* Helper used by the event_io_map hashtable code; returns true iff e1 and e2
103 * have the same e->fd. */
105 eqsocket(struct event_map_entry
*e1
, struct event_map_entry
*e2
)
107 return e1
->fd
== e2
->fd
;
110 HT_PROTOTYPE(event_io_map
, event_map_entry
, map_node
, hashsocket
, eqsocket
)
111 HT_GENERATE(event_io_map
, event_map_entry
, map_node
, hashsocket
, eqsocket
,
112 0.5, mm_malloc
, mm_realloc
, mm_free
)
114 #define GET_IO_SLOT(x, map, slot, type) \
116 struct event_map_entry _key, *_ent; \
118 _ent = HT_FIND(event_io_map, map, &_key); \
119 (x) = _ent ? &_ent->ent.type : NULL; \
120 } while (/*CONSTCOND*/0);
122 #define GET_IO_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \
124 struct event_map_entry _key, *_ent; \
126 _HT_FIND_OR_INSERT(event_io_map, map_node, hashsocket, map, \
127 event_map_entry, &_key, ptr, \
132 _ent = mm_calloc(1,sizeof(struct event_map_entry)+fdinfo_len); \
133 if (EVUTIL_UNLIKELY(_ent == NULL)) \
136 (ctor)(&_ent->ent.type); \
137 _HT_FOI_INSERT(map_node, map, &_key, _ent, ptr) \
139 (x) = &_ent->ent.type; \
140 } while (/*CONSTCOND*/0)
142 void evmap_io_initmap(struct event_io_map
*ctx
)
144 HT_INIT(event_io_map
, ctx
);
147 void evmap_io_clear(struct event_io_map
*ctx
)
149 struct event_map_entry
**ent
, **next
, *this;
150 for (ent
= HT_START(event_io_map
, ctx
); ent
; ent
= next
) {
152 next
= HT_NEXT_RMV(event_io_map
, ctx
, ent
);
155 HT_CLEAR(event_io_map
, ctx
); /* remove all storage held by the ctx. */
159 /* Set the variable 'x' to the field in event_map 'map' with fields of type
160 'struct type *' corresponding to the fd or signal 'slot'. Set 'x' to NULL
161 if there are no entries for 'slot'. Does no bounds-checking. */
162 #define GET_SIGNAL_SLOT(x, map, slot, type) \
163 (x) = (struct type *)((map)->entries[slot])
164 /* As GET_SLOT, but construct the entry for 'slot' if it is not present,
165 by allocating enough memory for a 'struct type', and initializing the new
166 value by calling the function 'ctor' on it. Makes the function
167 return -1 on allocation failure.
169 #define GET_SIGNAL_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \
171 if ((map)->entries[slot] == NULL) { \
172 (map)->entries[slot] = \
173 mm_calloc(1,sizeof(struct type)+fdinfo_len); \
174 if (EVUTIL_UNLIKELY((map)->entries[slot] == NULL)) \
176 (ctor)((struct type *)(map)->entries[slot]); \
178 (x) = (struct type *)((map)->entries[slot]); \
179 } while (/*CONSTCOND*/0)
181 /* If we aren't using hashtables, then define the IO_SLOT macros and functions
182 as thin aliases over the SIGNAL_SLOT versions. */
184 #define GET_IO_SLOT(x,map,slot,type) GET_SIGNAL_SLOT(x,map,slot,type)
185 #define GET_IO_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len) \
186 GET_SIGNAL_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len)
187 #define FDINFO_OFFSET sizeof(struct evmap_io)
189 evmap_io_initmap(struct event_io_map
* ctx
)
191 evmap_signal_initmap(ctx
);
194 evmap_io_clear(struct event_io_map
* ctx
)
196 evmap_signal_clear(ctx
);
201 /** Expand 'map' with new entries of width 'msize' until it is big enough
202 to store a value in 'slot'.
205 evmap_make_space(struct event_signal_map
*map
, int slot
, int msize
)
207 if (map
->nentries
<= slot
) {
208 int nentries
= map
->nentries
? map
->nentries
: 32;
211 while (nentries
<= slot
)
214 tmp
= (void **)mm_realloc(map
->entries
, nentries
* msize
);
218 memset(&tmp
[map
->nentries
], 0,
219 (nentries
- map
->nentries
) * msize
);
221 map
->nentries
= nentries
;
229 evmap_signal_initmap(struct event_signal_map
*ctx
)
236 evmap_signal_clear(struct event_signal_map
*ctx
)
238 if (ctx
->entries
!= NULL
) {
240 for (i
= 0; i
< ctx
->nentries
; ++i
) {
241 if (ctx
->entries
[i
] != NULL
)
242 mm_free(ctx
->entries
[i
]);
244 mm_free(ctx
->entries
);
251 /* code specific to file descriptors */
253 /** Constructor for struct evmap_io */
255 evmap_io_init(struct evmap_io
*entry
)
257 TAILQ_INIT(&entry
->events
);
263 /* return -1 on error, 0 on success if nothing changed in the event backend,
264 * and 1 on success if something did. */
266 evmap_io_add(struct event_base
*base
, evutil_socket_t fd
, struct event
*ev
)
268 const struct eventop
*evsel
= base
->evsel
;
269 struct event_io_map
*io
= &base
->io
;
270 struct evmap_io
*ctx
= NULL
;
271 int nread
, nwrite
, retval
= 0;
272 short res
= 0, old
= 0;
273 struct event
*old_ev
;
275 EVUTIL_ASSERT(fd
== ev
->ev_fd
);
281 if (fd
>= io
->nentries
) {
282 if (evmap_make_space(io
, fd
, sizeof(struct evmap_io
*)) == -1)
286 GET_IO_SLOT_AND_CTOR(ctx
, io
, fd
, evmap_io
, evmap_io_init
,
290 nwrite
= ctx
->nwrite
;
297 if (ev
->ev_events
& EV_READ
) {
301 if (ev
->ev_events
& EV_WRITE
) {
305 if (EVUTIL_UNLIKELY(nread
> 0xffff || nwrite
> 0xffff)) {
306 event_warnx("Too many events reading or writing on fd %d",
310 if (EVENT_DEBUG_MODE_IS_ON() &&
311 (old_ev
= TAILQ_FIRST(&ctx
->events
)) &&
312 (old_ev
->ev_events
&EV_ET
) != (ev
->ev_events
&EV_ET
)) {
313 event_warnx("Tried to mix edge-triggered and non-edge-triggered"
314 " events on fd %d", (int)fd
);
319 void *extra
= ((char*)ctx
) + sizeof(struct evmap_io
);
320 /* XXX(niels): we cannot mix edge-triggered and
321 * level-triggered, we should probably assert on
323 if (evsel
->add(base
, ev
->ev_fd
,
324 old
, (ev
->ev_events
& EV_ET
) | res
, extra
) == -1)
329 ctx
->nread
= (ev_uint16_t
) nread
;
330 ctx
->nwrite
= (ev_uint16_t
) nwrite
;
331 TAILQ_INSERT_TAIL(&ctx
->events
, ev
, ev_io_next
);
336 /* return -1 on error, 0 on success if nothing changed in the event backend,
337 * and 1 on success if something did. */
339 evmap_io_del(struct event_base
*base
, evutil_socket_t fd
, struct event
*ev
)
341 const struct eventop
*evsel
= base
->evsel
;
342 struct event_io_map
*io
= &base
->io
;
343 struct evmap_io
*ctx
;
344 int nread
, nwrite
, retval
= 0;
345 short res
= 0, old
= 0;
350 EVUTIL_ASSERT(fd
== ev
->ev_fd
);
353 if (fd
>= io
->nentries
)
357 GET_IO_SLOT(ctx
, io
, fd
, evmap_io
);
360 nwrite
= ctx
->nwrite
;
367 if (ev
->ev_events
& EV_READ
) {
370 EVUTIL_ASSERT(nread
>= 0);
372 if (ev
->ev_events
& EV_WRITE
) {
375 EVUTIL_ASSERT(nwrite
>= 0);
379 void *extra
= ((char*)ctx
) + sizeof(struct evmap_io
);
380 if (evsel
->del(base
, ev
->ev_fd
, old
, res
, extra
) == -1)
386 ctx
->nwrite
= nwrite
;
387 TAILQ_REMOVE(&ctx
->events
, ev
, ev_io_next
);
393 evmap_io_active(struct event_base
*base
, evutil_socket_t fd
, short events
)
395 struct event_io_map
*io
= &base
->io
;
396 struct evmap_io
*ctx
;
400 EVUTIL_ASSERT(fd
< io
->nentries
);
402 GET_IO_SLOT(ctx
, io
, fd
, evmap_io
);
405 TAILQ_FOREACH(ev
, &ctx
->events
, ev_io_next
) {
406 if (ev
->ev_events
& events
)
407 event_active_nolock(ev
, ev
->ev_events
& events
, 1);
411 /* code specific to signals */
414 evmap_signal_init(struct evmap_signal
*entry
)
416 TAILQ_INIT(&entry
->events
);
421 evmap_signal_add(struct event_base
*base
, int sig
, struct event
*ev
)
423 const struct eventop
*evsel
= base
->evsigsel
;
424 struct event_signal_map
*map
= &base
->sigmap
;
425 struct evmap_signal
*ctx
= NULL
;
427 if (sig
>= map
->nentries
) {
428 if (evmap_make_space(
429 map
, sig
, sizeof(struct evmap_signal
*)) == -1)
432 GET_SIGNAL_SLOT_AND_CTOR(ctx
, map
, sig
, evmap_signal
, evmap_signal_init
,
433 base
->evsigsel
->fdinfo_len
);
435 if (TAILQ_EMPTY(&ctx
->events
)) {
436 if (evsel
->add(base
, ev
->ev_fd
, 0, EV_SIGNAL
, NULL
)
441 TAILQ_INSERT_TAIL(&ctx
->events
, ev
, ev_signal_next
);
447 evmap_signal_del(struct event_base
*base
, int sig
, struct event
*ev
)
449 const struct eventop
*evsel
= base
->evsigsel
;
450 struct event_signal_map
*map
= &base
->sigmap
;
451 struct evmap_signal
*ctx
;
453 if (sig
>= map
->nentries
)
456 GET_SIGNAL_SLOT(ctx
, map
, sig
, evmap_signal
);
458 if (TAILQ_FIRST(&ctx
->events
) == TAILQ_LAST(&ctx
->events
, event_list
)) {
459 if (evsel
->del(base
, ev
->ev_fd
, 0, EV_SIGNAL
, NULL
) == -1)
463 TAILQ_REMOVE(&ctx
->events
, ev
, ev_signal_next
);
469 evmap_signal_active(struct event_base
*base
, evutil_socket_t sig
, int ncalls
)
471 struct event_signal_map
*map
= &base
->sigmap
;
472 struct evmap_signal
*ctx
;
475 EVUTIL_ASSERT(sig
< map
->nentries
);
476 GET_SIGNAL_SLOT(ctx
, map
, sig
, evmap_signal
);
478 TAILQ_FOREACH(ev
, &ctx
->events
, ev_signal_next
)
479 event_active_nolock(ev
, EV_SIGNAL
, ncalls
);
483 evmap_io_get_fdinfo(struct event_io_map
*map
, evutil_socket_t fd
)
485 struct evmap_io
*ctx
;
486 GET_IO_SLOT(ctx
, map
, fd
, evmap_io
);
488 return ((char*)ctx
) + sizeof(struct evmap_io
);
493 /** Per-fd structure for use with changelists. It keeps track, for each fd or
494 * signal using the changelist, of where its entry in the changelist is.
496 struct event_changelist_fdinfo
{
497 int idxplus1
; /* this is the index +1, so that memset(0) will make it
498 * a no-such-element */
502 event_changelist_init(struct event_changelist
*changelist
)
504 changelist
->changes
= NULL
;
505 changelist
->changes_size
= 0;
506 changelist
->n_changes
= 0;
509 /** Helper: return the changelist_fdinfo corresponding to a given change. */
510 static inline struct event_changelist_fdinfo
*
511 event_change_get_fdinfo(struct event_base
*base
,
512 const struct event_change
*change
)
515 if (change
->read_change
& EV_CHANGE_SIGNAL
) {
516 struct evmap_signal
*ctx
;
517 GET_SIGNAL_SLOT(ctx
, &base
->sigmap
, change
->fd
, evmap_signal
);
518 ptr
= ((char*)ctx
) + sizeof(struct evmap_signal
);
520 struct evmap_io
*ctx
;
521 GET_IO_SLOT(ctx
, &base
->io
, change
->fd
, evmap_io
);
522 ptr
= ((char*)ctx
) + sizeof(struct evmap_io
);
527 #ifdef DEBUG_CHANGELIST
528 /** Make sure that the changelist is consistent with the evmap structures. */
530 event_changelist_check(struct event_base
*base
)
533 struct event_changelist
*changelist
= &base
->changelist
;
535 EVUTIL_ASSERT(changelist
->changes_size
>= changelist
->n_changes
);
536 for (i
= 0; i
< changelist
->n_changes
; ++i
) {
537 struct event_change
*c
= &changelist
->changes
[i
];
538 struct event_changelist_fdinfo
*f
;
539 EVUTIL_ASSERT(c
->fd
>= 0);
540 f
= event_change_get_fdinfo(base
, c
);
542 EVUTIL_ASSERT(f
->idxplus1
== i
+ 1);
545 for (i
= 0; i
< base
->io
.nentries
; ++i
) {
546 struct evmap_io
*io
= base
->io
.entries
[i
];
547 struct event_changelist_fdinfo
*f
;
551 ( ((char*)io
) + sizeof(struct evmap_io
) );
553 struct event_change
*c
= &changelist
->changes
[f
->idxplus1
- 1];
554 EVUTIL_ASSERT(c
->fd
== i
);
559 #define event_changelist_check(base) ((void)0)
563 event_changelist_remove_all(struct event_changelist
*changelist
,
564 struct event_base
*base
)
568 event_changelist_check(base
);
570 for (i
= 0; i
< changelist
->n_changes
; ++i
) {
571 struct event_change
*ch
= &changelist
->changes
[i
];
572 struct event_changelist_fdinfo
*fdinfo
=
573 event_change_get_fdinfo(base
, ch
);
574 EVUTIL_ASSERT(fdinfo
->idxplus1
== i
+ 1);
575 fdinfo
->idxplus1
= 0;
578 changelist
->n_changes
= 0;
580 event_changelist_check(base
);
584 event_changelist_freemem(struct event_changelist
*changelist
)
586 if (changelist
->changes
)
587 mm_free(changelist
->changes
);
588 event_changelist_init(changelist
); /* zero it all out. */
591 /** Increase the size of 'changelist' to hold more changes. */
593 event_changelist_grow(struct event_changelist
*changelist
)
596 struct event_change
*new_changes
;
597 if (changelist
->changes_size
< 64)
600 new_size
= changelist
->changes_size
* 2;
602 new_changes
= mm_realloc(changelist
->changes
,
603 new_size
* sizeof(struct event_change
));
605 if (EVUTIL_UNLIKELY(new_changes
== NULL
))
608 changelist
->changes
= new_changes
;
609 changelist
->changes_size
= new_size
;
614 /** Return a pointer to the changelist entry for the file descriptor or signal
615 * 'fd', whose fdinfo is 'fdinfo'. If none exists, construct it, setting its
616 * old_events field to old_events.
618 static struct event_change
*
619 event_changelist_get_or_construct(struct event_changelist
*changelist
,
622 struct event_changelist_fdinfo
*fdinfo
)
624 struct event_change
*change
;
626 if (fdinfo
->idxplus1
== 0) {
628 EVUTIL_ASSERT(changelist
->n_changes
<= changelist
->changes_size
);
630 if (changelist
->n_changes
== changelist
->changes_size
) {
631 if (event_changelist_grow(changelist
) < 0)
635 idx
= changelist
->n_changes
++;
636 change
= &changelist
->changes
[idx
];
637 fdinfo
->idxplus1
= idx
+ 1;
639 memset(change
, 0, sizeof(struct event_change
));
641 change
->old_events
= old_events
;
643 change
= &changelist
->changes
[fdinfo
->idxplus1
- 1];
644 EVUTIL_ASSERT(change
->fd
== fd
);
650 event_changelist_add(struct event_base
*base
, evutil_socket_t fd
, short old
, short events
,
653 struct event_changelist
*changelist
= &base
->changelist
;
654 struct event_changelist_fdinfo
*fdinfo
= p
;
655 struct event_change
*change
;
657 event_changelist_check(base
);
659 change
= event_changelist_get_or_construct(changelist
, fd
, old
, fdinfo
);
663 /* An add replaces any previous delete, but doesn't result in a no-op,
664 * since the delete might fail (because the fd had been closed since
665 * the last add, for instance. */
667 if (events
& (EV_READ
|EV_SIGNAL
)) {
668 change
->read_change
= EV_CHANGE_ADD
|
669 (events
& (EV_ET
|EV_PERSIST
|EV_SIGNAL
));
671 if (events
& EV_WRITE
) {
672 change
->write_change
= EV_CHANGE_ADD
|
673 (events
& (EV_ET
|EV_PERSIST
|EV_SIGNAL
));
676 event_changelist_check(base
);
681 event_changelist_del(struct event_base
*base
, evutil_socket_t fd
, short old
, short events
,
684 struct event_changelist
*changelist
= &base
->changelist
;
685 struct event_changelist_fdinfo
*fdinfo
= p
;
686 struct event_change
*change
;
688 event_changelist_check(base
);
689 change
= event_changelist_get_or_construct(changelist
, fd
, old
, fdinfo
);
690 event_changelist_check(base
);
694 /* A delete removes any previous add, rather than replacing it:
695 on those platforms where "add, delete, dispatch" is not the same
696 as "no-op, dispatch", we want the no-op behavior.
698 As well as checking the current operation we should also check
699 the original set of events to make sure were not ignoring
700 the case where the add operation is present on an event that
703 If we have a no-op item, we could remove it it from the list
704 entirely, but really there's not much point: skipping the no-op
705 change when we do the dispatch later is far cheaper than rejuggling
708 As this stands, it also lets through deletions of events that are
712 if (events
& (EV_READ
|EV_SIGNAL
)) {
713 if (!(change
->old_events
& (EV_READ
| EV_SIGNAL
)) &&
714 (change
->read_change
& EV_CHANGE_ADD
))
715 change
->read_change
= 0;
717 change
->read_change
= EV_CHANGE_DEL
;
719 if (events
& EV_WRITE
) {
720 if (!(change
->old_events
& EV_WRITE
) &&
721 (change
->write_change
& EV_CHANGE_ADD
))
722 change
->write_change
= 0;
724 change
->write_change
= EV_CHANGE_DEL
;
727 event_changelist_check(base
);
732 evmap_check_integrity(struct event_base
*base
)
734 #define EVLIST_X_SIGFOUND 0x1000
735 #define EVLIST_X_IOFOUND 0x2000
739 struct event_io_map
*io
= &base
->io
;
740 struct event_signal_map
*sigmap
= &base
->sigmap
;
742 struct event_map_entry
**mapent
;
744 int nsignals
, ntimers
, nio
;
745 nsignals
= ntimers
= nio
= 0;
747 TAILQ_FOREACH(ev
, &base
->eventqueue
, ev_next
) {
748 EVUTIL_ASSERT(ev
->ev_flags
& EVLIST_INSERTED
);
749 EVUTIL_ASSERT(ev
->ev_flags
& EVLIST_INIT
);
750 ev
->ev_flags
&= ~(EVLIST_X_SIGFOUND
|EVLIST_X_IOFOUND
);
754 HT_FOREACH(mapent
, event_io_map
, io
) {
755 struct evmap_io
*ctx
= &(*mapent
)->ent
.evmap_io
;
758 for (i
= 0; i
< io
->nentries
; ++i
) {
759 struct evmap_io
*ctx
= io
->entries
[i
];
765 TAILQ_FOREACH(ev
, &ctx
->events
, ev_io_next
) {
766 EVUTIL_ASSERT(!(ev
->ev_flags
& EVLIST_X_IOFOUND
));
767 EVUTIL_ASSERT(ev
->ev_fd
== i
);
768 ev
->ev_flags
|= EVLIST_X_IOFOUND
;
773 for (i
= 0; i
< sigmap
->nentries
; ++i
) {
774 struct evmap_signal
*ctx
= sigmap
->entries
[i
];
778 TAILQ_FOREACH(ev
, &ctx
->events
, ev_signal_next
) {
779 EVUTIL_ASSERT(!(ev
->ev_flags
& EVLIST_X_SIGFOUND
));
780 EVUTIL_ASSERT(ev
->ev_fd
== i
);
781 ev
->ev_flags
|= EVLIST_X_SIGFOUND
;
786 TAILQ_FOREACH(ev
, &base
->eventqueue
, ev_next
) {
787 if (ev
->ev_events
& (EV_READ
|EV_WRITE
)) {
788 EVUTIL_ASSERT(ev
->ev_flags
& EVLIST_X_IOFOUND
);
791 if (ev
->ev_events
& EV_SIGNAL
) {
792 EVUTIL_ASSERT(ev
->ev_flags
& EVLIST_X_SIGFOUND
);
797 EVUTIL_ASSERT(nio
== 0);
798 EVUTIL_ASSERT(nsignals
== 0);
799 /* There is no "EVUTIL_ASSERT(ntimers == 0)": eventqueue is only for
800 * pending signals and io events.