etc/services - sync with NetBSD-8
[minix.git] / external / bsd / libevent / dist / evmap.c
blob8fd75d63bc74e5d8eb3f4109f47fcf250a0fefaa
1 /* $NetBSD: evmap.c,v 1.2 2013/04/11 16:56:41 christos Exp $ */
2 /*
3 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "event2/event-config.h"
28 #include <sys/cdefs.h>
29 __RCSID("$NetBSD: evmap.c,v 1.2 2013/04/11 16:56:41 christos Exp $");
31 #ifdef WIN32
32 #include <winsock2.h>
33 #define WIN32_LEAN_AND_MEAN
34 #include <windows.h>
35 #undef WIN32_LEAN_AND_MEAN
36 #endif
37 #include <sys/types.h>
38 #if !defined(WIN32) && defined(_EVENT_HAVE_SYS_TIME_H)
39 #include <sys/time.h>
40 #endif
41 #include <sys/queue.h>
42 #include <stdio.h>
43 #include <stdlib.h>
44 #ifndef WIN32
45 #include <unistd.h>
46 #endif
47 #include <errno.h>
48 #include <signal.h>
49 #include <string.h>
50 #include <time.h>
52 #include "event-internal.h"
53 #include "evmap-internal.h"
54 #include "mm-internal.h"
55 #include "changelist-internal.h"
57 /** An entry for an evmap_io list: notes all the events that want to read or
58 write on a given fd, and the number of each.
60 struct evmap_io {
61 struct event_list events;
62 ev_uint16_t nread;
63 ev_uint16_t nwrite;
66 /* An entry for an evmap_signal list: notes all the events that want to know
67 when a signal triggers. */
68 struct evmap_signal {
69 struct event_list events;
72 /* On some platforms, fds start at 0 and increment by 1 as they are
73 allocated, and old numbers get used. For these platforms, we
74 implement io maps just like signal maps: as an array of pointers to
75 struct evmap_io. But on other platforms (windows), sockets are not
76 0-indexed, not necessarily consecutive, and not necessarily reused.
77 There, we use a hashtable to implement evmap_io.
79 #ifdef EVMAP_USE_HT
80 struct event_map_entry {
81 HT_ENTRY(event_map_entry) map_node;
82 evutil_socket_t fd;
83 union { /* This is a union in case we need to make more things that can
84 be in the hashtable. */
85 struct evmap_io evmap_io;
86 } ent;
89 /* Helper used by the event_io_map hashtable code; tries to return a good hash
90 * of the fd in e->fd. */
91 static inline unsigned
92 hashsocket(struct event_map_entry *e)
94 /* On win32, in practice, the low 2-3 bits of a SOCKET seem not to
95 * matter. Our hashtable implementation really likes low-order bits,
96 * though, so let's do the rotate-and-add trick. */
97 unsigned h = (unsigned) e->fd;
98 h += (h >> 2) | (h << 30);
99 return h;
102 /* Helper used by the event_io_map hashtable code; returns true iff e1 and e2
103 * have the same e->fd. */
104 static inline int
105 eqsocket(struct event_map_entry *e1, struct event_map_entry *e2)
107 return e1->fd == e2->fd;
110 HT_PROTOTYPE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket)
111 HT_GENERATE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket,
112 0.5, mm_malloc, mm_realloc, mm_free)
114 #define GET_IO_SLOT(x, map, slot, type) \
115 do { \
116 struct event_map_entry _key, *_ent; \
117 _key.fd = slot; \
118 _ent = HT_FIND(event_io_map, map, &_key); \
119 (x) = _ent ? &_ent->ent.type : NULL; \
120 } while (/*CONSTCOND*/0);
122 #define GET_IO_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \
123 do { \
124 struct event_map_entry _key, *_ent; \
125 _key.fd = slot; \
126 _HT_FIND_OR_INSERT(event_io_map, map_node, hashsocket, map, \
127 event_map_entry, &_key, ptr, \
129 _ent = *ptr; \
130 }, \
132 _ent = mm_calloc(1,sizeof(struct event_map_entry)+fdinfo_len); \
133 if (EVUTIL_UNLIKELY(_ent == NULL)) \
134 return (-1); \
135 _ent->fd = slot; \
136 (ctor)(&_ent->ent.type); \
137 _HT_FOI_INSERT(map_node, map, &_key, _ent, ptr) \
138 }); \
139 (x) = &_ent->ent.type; \
140 } while (/*CONSTCOND*/0)
142 void evmap_io_initmap(struct event_io_map *ctx)
144 HT_INIT(event_io_map, ctx);
147 void evmap_io_clear(struct event_io_map *ctx)
149 struct event_map_entry **ent, **next, *this;
150 for (ent = HT_START(event_io_map, ctx); ent; ent = next) {
151 this = *ent;
152 next = HT_NEXT_RMV(event_io_map, ctx, ent);
153 mm_free(this);
155 HT_CLEAR(event_io_map, ctx); /* remove all storage held by the ctx. */
157 #endif
159 /* Set the variable 'x' to the field in event_map 'map' with fields of type
160 'struct type *' corresponding to the fd or signal 'slot'. Set 'x' to NULL
161 if there are no entries for 'slot'. Does no bounds-checking. */
162 #define GET_SIGNAL_SLOT(x, map, slot, type) \
163 (x) = (struct type *)((map)->entries[slot])
164 /* As GET_SLOT, but construct the entry for 'slot' if it is not present,
165 by allocating enough memory for a 'struct type', and initializing the new
166 value by calling the function 'ctor' on it. Makes the function
167 return -1 on allocation failure.
169 #define GET_SIGNAL_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \
170 do { \
171 if ((map)->entries[slot] == NULL) { \
172 (map)->entries[slot] = \
173 mm_calloc(1,sizeof(struct type)+fdinfo_len); \
174 if (EVUTIL_UNLIKELY((map)->entries[slot] == NULL)) \
175 return (-1); \
176 (ctor)((struct type *)(map)->entries[slot]); \
178 (x) = (struct type *)((map)->entries[slot]); \
179 } while (/*CONSTCOND*/0)
181 /* If we aren't using hashtables, then define the IO_SLOT macros and functions
182 as thin aliases over the SIGNAL_SLOT versions. */
183 #ifndef EVMAP_USE_HT
184 #define GET_IO_SLOT(x,map,slot,type) GET_SIGNAL_SLOT(x,map,slot,type)
185 #define GET_IO_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len) \
186 GET_SIGNAL_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len)
187 #define FDINFO_OFFSET sizeof(struct evmap_io)
188 void
189 evmap_io_initmap(struct event_io_map* ctx)
191 evmap_signal_initmap(ctx);
193 void
194 evmap_io_clear(struct event_io_map* ctx)
196 evmap_signal_clear(ctx);
198 #endif
201 /** Expand 'map' with new entries of width 'msize' until it is big enough
202 to store a value in 'slot'.
204 static int
205 evmap_make_space(struct event_signal_map *map, int slot, int msize)
207 if (map->nentries <= slot) {
208 int nentries = map->nentries ? map->nentries : 32;
209 void **tmp;
211 while (nentries <= slot)
212 nentries <<= 1;
214 tmp = (void **)mm_realloc(map->entries, nentries * msize);
215 if (tmp == NULL)
216 return (-1);
218 memset(&tmp[map->nentries], 0,
219 (nentries - map->nentries) * msize);
221 map->nentries = nentries;
222 map->entries = tmp;
225 return (0);
228 void
229 evmap_signal_initmap(struct event_signal_map *ctx)
231 ctx->nentries = 0;
232 ctx->entries = NULL;
235 void
236 evmap_signal_clear(struct event_signal_map *ctx)
238 if (ctx->entries != NULL) {
239 int i;
240 for (i = 0; i < ctx->nentries; ++i) {
241 if (ctx->entries[i] != NULL)
242 mm_free(ctx->entries[i]);
244 mm_free(ctx->entries);
245 ctx->entries = NULL;
247 ctx->nentries = 0;
251 /* code specific to file descriptors */
253 /** Constructor for struct evmap_io */
254 static void
255 evmap_io_init(struct evmap_io *entry)
257 TAILQ_INIT(&entry->events);
258 entry->nread = 0;
259 entry->nwrite = 0;
263 /* return -1 on error, 0 on success if nothing changed in the event backend,
264 * and 1 on success if something did. */
266 evmap_io_add(struct event_base *base, evutil_socket_t fd, struct event *ev)
268 const struct eventop *evsel = base->evsel;
269 struct event_io_map *io = &base->io;
270 struct evmap_io *ctx = NULL;
271 int nread, nwrite, retval = 0;
272 short res = 0, old = 0;
273 struct event *old_ev;
275 EVUTIL_ASSERT(fd == ev->ev_fd);
277 if (fd < 0)
278 return 0;
280 #ifndef EVMAP_USE_HT
281 if (fd >= io->nentries) {
282 if (evmap_make_space(io, fd, sizeof(struct evmap_io *)) == -1)
283 return (-1);
285 #endif
286 GET_IO_SLOT_AND_CTOR(ctx, io, fd, evmap_io, evmap_io_init,
287 evsel->fdinfo_len);
289 nread = ctx->nread;
290 nwrite = ctx->nwrite;
292 if (nread)
293 old |= EV_READ;
294 if (nwrite)
295 old |= EV_WRITE;
297 if (ev->ev_events & EV_READ) {
298 if (++nread == 1)
299 res |= EV_READ;
301 if (ev->ev_events & EV_WRITE) {
302 if (++nwrite == 1)
303 res |= EV_WRITE;
305 if (EVUTIL_UNLIKELY(nread > 0xffff || nwrite > 0xffff)) {
306 event_warnx("Too many events reading or writing on fd %d",
307 (int)fd);
308 return -1;
310 if (EVENT_DEBUG_MODE_IS_ON() &&
311 (old_ev = TAILQ_FIRST(&ctx->events)) &&
312 (old_ev->ev_events&EV_ET) != (ev->ev_events&EV_ET)) {
313 event_warnx("Tried to mix edge-triggered and non-edge-triggered"
314 " events on fd %d", (int)fd);
315 return -1;
318 if (res) {
319 void *extra = ((char*)ctx) + sizeof(struct evmap_io);
320 /* XXX(niels): we cannot mix edge-triggered and
321 * level-triggered, we should probably assert on
322 * this. */
323 if (evsel->add(base, ev->ev_fd,
324 old, (ev->ev_events & EV_ET) | res, extra) == -1)
325 return (-1);
326 retval = 1;
329 ctx->nread = (ev_uint16_t) nread;
330 ctx->nwrite = (ev_uint16_t) nwrite;
331 TAILQ_INSERT_TAIL(&ctx->events, ev, ev_io_next);
333 return (retval);
336 /* return -1 on error, 0 on success if nothing changed in the event backend,
337 * and 1 on success if something did. */
339 evmap_io_del(struct event_base *base, evutil_socket_t fd, struct event *ev)
341 const struct eventop *evsel = base->evsel;
342 struct event_io_map *io = &base->io;
343 struct evmap_io *ctx;
344 int nread, nwrite, retval = 0;
345 short res = 0, old = 0;
347 if (fd < 0)
348 return 0;
350 EVUTIL_ASSERT(fd == ev->ev_fd);
352 #ifndef EVMAP_USE_HT
353 if (fd >= io->nentries)
354 return (-1);
355 #endif
357 GET_IO_SLOT(ctx, io, fd, evmap_io);
359 nread = ctx->nread;
360 nwrite = ctx->nwrite;
362 if (nread)
363 old |= EV_READ;
364 if (nwrite)
365 old |= EV_WRITE;
367 if (ev->ev_events & EV_READ) {
368 if (--nread == 0)
369 res |= EV_READ;
370 EVUTIL_ASSERT(nread >= 0);
372 if (ev->ev_events & EV_WRITE) {
373 if (--nwrite == 0)
374 res |= EV_WRITE;
375 EVUTIL_ASSERT(nwrite >= 0);
378 if (res) {
379 void *extra = ((char*)ctx) + sizeof(struct evmap_io);
380 if (evsel->del(base, ev->ev_fd, old, res, extra) == -1)
381 return (-1);
382 retval = 1;
385 ctx->nread = nread;
386 ctx->nwrite = nwrite;
387 TAILQ_REMOVE(&ctx->events, ev, ev_io_next);
389 return (retval);
392 void
393 evmap_io_active(struct event_base *base, evutil_socket_t fd, short events)
395 struct event_io_map *io = &base->io;
396 struct evmap_io *ctx;
397 struct event *ev;
399 #ifndef EVMAP_USE_HT
400 EVUTIL_ASSERT(fd < io->nentries);
401 #endif
402 GET_IO_SLOT(ctx, io, fd, evmap_io);
404 EVUTIL_ASSERT(ctx);
405 TAILQ_FOREACH(ev, &ctx->events, ev_io_next) {
406 if (ev->ev_events & events)
407 event_active_nolock(ev, ev->ev_events & events, 1);
411 /* code specific to signals */
413 static void
414 evmap_signal_init(struct evmap_signal *entry)
416 TAILQ_INIT(&entry->events);
421 evmap_signal_add(struct event_base *base, int sig, struct event *ev)
423 const struct eventop *evsel = base->evsigsel;
424 struct event_signal_map *map = &base->sigmap;
425 struct evmap_signal *ctx = NULL;
427 if (sig >= map->nentries) {
428 if (evmap_make_space(
429 map, sig, sizeof(struct evmap_signal *)) == -1)
430 return (-1);
432 GET_SIGNAL_SLOT_AND_CTOR(ctx, map, sig, evmap_signal, evmap_signal_init,
433 base->evsigsel->fdinfo_len);
435 if (TAILQ_EMPTY(&ctx->events)) {
436 if (evsel->add(base, ev->ev_fd, 0, EV_SIGNAL, NULL)
437 == -1)
438 return (-1);
441 TAILQ_INSERT_TAIL(&ctx->events, ev, ev_signal_next);
443 return (1);
447 evmap_signal_del(struct event_base *base, int sig, struct event *ev)
449 const struct eventop *evsel = base->evsigsel;
450 struct event_signal_map *map = &base->sigmap;
451 struct evmap_signal *ctx;
453 if (sig >= map->nentries)
454 return (-1);
456 GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
458 if (TAILQ_FIRST(&ctx->events) == TAILQ_LAST(&ctx->events, event_list)) {
459 if (evsel->del(base, ev->ev_fd, 0, EV_SIGNAL, NULL) == -1)
460 return (-1);
463 TAILQ_REMOVE(&ctx->events, ev, ev_signal_next);
465 return (1);
468 void
469 evmap_signal_active(struct event_base *base, evutil_socket_t sig, int ncalls)
471 struct event_signal_map *map = &base->sigmap;
472 struct evmap_signal *ctx;
473 struct event *ev;
475 EVUTIL_ASSERT(sig < map->nentries);
476 GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
478 TAILQ_FOREACH(ev, &ctx->events, ev_signal_next)
479 event_active_nolock(ev, EV_SIGNAL, ncalls);
482 void *
483 evmap_io_get_fdinfo(struct event_io_map *map, evutil_socket_t fd)
485 struct evmap_io *ctx;
486 GET_IO_SLOT(ctx, map, fd, evmap_io);
487 if (ctx)
488 return ((char*)ctx) + sizeof(struct evmap_io);
489 else
490 return NULL;
493 /** Per-fd structure for use with changelists. It keeps track, for each fd or
494 * signal using the changelist, of where its entry in the changelist is.
496 struct event_changelist_fdinfo {
497 int idxplus1; /* this is the index +1, so that memset(0) will make it
498 * a no-such-element */
501 void
502 event_changelist_init(struct event_changelist *changelist)
504 changelist->changes = NULL;
505 changelist->changes_size = 0;
506 changelist->n_changes = 0;
509 /** Helper: return the changelist_fdinfo corresponding to a given change. */
510 static inline struct event_changelist_fdinfo *
511 event_change_get_fdinfo(struct event_base *base,
512 const struct event_change *change)
514 char *ptr;
515 if (change->read_change & EV_CHANGE_SIGNAL) {
516 struct evmap_signal *ctx;
517 GET_SIGNAL_SLOT(ctx, &base->sigmap, change->fd, evmap_signal);
518 ptr = ((char*)ctx) + sizeof(struct evmap_signal);
519 } else {
520 struct evmap_io *ctx;
521 GET_IO_SLOT(ctx, &base->io, change->fd, evmap_io);
522 ptr = ((char*)ctx) + sizeof(struct evmap_io);
524 return (void*)ptr;
527 #ifdef DEBUG_CHANGELIST
528 /** Make sure that the changelist is consistent with the evmap structures. */
529 static void
530 event_changelist_check(struct event_base *base)
532 int i;
533 struct event_changelist *changelist = &base->changelist;
535 EVUTIL_ASSERT(changelist->changes_size >= changelist->n_changes);
536 for (i = 0; i < changelist->n_changes; ++i) {
537 struct event_change *c = &changelist->changes[i];
538 struct event_changelist_fdinfo *f;
539 EVUTIL_ASSERT(c->fd >= 0);
540 f = event_change_get_fdinfo(base, c);
541 EVUTIL_ASSERT(f);
542 EVUTIL_ASSERT(f->idxplus1 == i + 1);
545 for (i = 0; i < base->io.nentries; ++i) {
546 struct evmap_io *io = base->io.entries[i];
547 struct event_changelist_fdinfo *f;
548 if (!io)
549 continue;
550 f = (void*)
551 ( ((char*)io) + sizeof(struct evmap_io) );
552 if (f->idxplus1) {
553 struct event_change *c = &changelist->changes[f->idxplus1 - 1];
554 EVUTIL_ASSERT(c->fd == i);
558 #else
559 #define event_changelist_check(base) ((void)0)
560 #endif
562 void
563 event_changelist_remove_all(struct event_changelist *changelist,
564 struct event_base *base)
566 int i;
568 event_changelist_check(base);
570 for (i = 0; i < changelist->n_changes; ++i) {
571 struct event_change *ch = &changelist->changes[i];
572 struct event_changelist_fdinfo *fdinfo =
573 event_change_get_fdinfo(base, ch);
574 EVUTIL_ASSERT(fdinfo->idxplus1 == i + 1);
575 fdinfo->idxplus1 = 0;
578 changelist->n_changes = 0;
580 event_changelist_check(base);
583 void
584 event_changelist_freemem(struct event_changelist *changelist)
586 if (changelist->changes)
587 mm_free(changelist->changes);
588 event_changelist_init(changelist); /* zero it all out. */
591 /** Increase the size of 'changelist' to hold more changes. */
592 static int
593 event_changelist_grow(struct event_changelist *changelist)
595 int new_size;
596 struct event_change *new_changes;
597 if (changelist->changes_size < 64)
598 new_size = 64;
599 else
600 new_size = changelist->changes_size * 2;
602 new_changes = mm_realloc(changelist->changes,
603 new_size * sizeof(struct event_change));
605 if (EVUTIL_UNLIKELY(new_changes == NULL))
606 return (-1);
608 changelist->changes = new_changes;
609 changelist->changes_size = new_size;
611 return (0);
614 /** Return a pointer to the changelist entry for the file descriptor or signal
615 * 'fd', whose fdinfo is 'fdinfo'. If none exists, construct it, setting its
616 * old_events field to old_events.
618 static struct event_change *
619 event_changelist_get_or_construct(struct event_changelist *changelist,
620 evutil_socket_t fd,
621 short old_events,
622 struct event_changelist_fdinfo *fdinfo)
624 struct event_change *change;
626 if (fdinfo->idxplus1 == 0) {
627 int idx;
628 EVUTIL_ASSERT(changelist->n_changes <= changelist->changes_size);
630 if (changelist->n_changes == changelist->changes_size) {
631 if (event_changelist_grow(changelist) < 0)
632 return NULL;
635 idx = changelist->n_changes++;
636 change = &changelist->changes[idx];
637 fdinfo->idxplus1 = idx + 1;
639 memset(change, 0, sizeof(struct event_change));
640 change->fd = fd;
641 change->old_events = old_events;
642 } else {
643 change = &changelist->changes[fdinfo->idxplus1 - 1];
644 EVUTIL_ASSERT(change->fd == fd);
646 return change;
650 event_changelist_add(struct event_base *base, evutil_socket_t fd, short old, short events,
651 void *p)
653 struct event_changelist *changelist = &base->changelist;
654 struct event_changelist_fdinfo *fdinfo = p;
655 struct event_change *change;
657 event_changelist_check(base);
659 change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
660 if (!change)
661 return -1;
663 /* An add replaces any previous delete, but doesn't result in a no-op,
664 * since the delete might fail (because the fd had been closed since
665 * the last add, for instance. */
667 if (events & (EV_READ|EV_SIGNAL)) {
668 change->read_change = EV_CHANGE_ADD |
669 (events & (EV_ET|EV_PERSIST|EV_SIGNAL));
671 if (events & EV_WRITE) {
672 change->write_change = EV_CHANGE_ADD |
673 (events & (EV_ET|EV_PERSIST|EV_SIGNAL));
676 event_changelist_check(base);
677 return (0);
681 event_changelist_del(struct event_base *base, evutil_socket_t fd, short old, short events,
682 void *p)
684 struct event_changelist *changelist = &base->changelist;
685 struct event_changelist_fdinfo *fdinfo = p;
686 struct event_change *change;
688 event_changelist_check(base);
689 change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
690 event_changelist_check(base);
691 if (!change)
692 return -1;
694 /* A delete removes any previous add, rather than replacing it:
695 on those platforms where "add, delete, dispatch" is not the same
696 as "no-op, dispatch", we want the no-op behavior.
698 As well as checking the current operation we should also check
699 the original set of events to make sure were not ignoring
700 the case where the add operation is present on an event that
701 was already set.
703 If we have a no-op item, we could remove it it from the list
704 entirely, but really there's not much point: skipping the no-op
705 change when we do the dispatch later is far cheaper than rejuggling
706 the array now.
708 As this stands, it also lets through deletions of events that are
709 not currently set.
712 if (events & (EV_READ|EV_SIGNAL)) {
713 if (!(change->old_events & (EV_READ | EV_SIGNAL)) &&
714 (change->read_change & EV_CHANGE_ADD))
715 change->read_change = 0;
716 else
717 change->read_change = EV_CHANGE_DEL;
719 if (events & EV_WRITE) {
720 if (!(change->old_events & EV_WRITE) &&
721 (change->write_change & EV_CHANGE_ADD))
722 change->write_change = 0;
723 else
724 change->write_change = EV_CHANGE_DEL;
727 event_changelist_check(base);
728 return (0);
731 void
732 evmap_check_integrity(struct event_base *base)
734 #define EVLIST_X_SIGFOUND 0x1000
735 #define EVLIST_X_IOFOUND 0x2000
737 evutil_socket_t i;
738 struct event *ev;
739 struct event_io_map *io = &base->io;
740 struct event_signal_map *sigmap = &base->sigmap;
741 #ifdef EVMAP_USE_HT
742 struct event_map_entry **mapent;
743 #endif
744 int nsignals, ntimers, nio;
745 nsignals = ntimers = nio = 0;
747 TAILQ_FOREACH(ev, &base->eventqueue, ev_next) {
748 EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED);
749 EVUTIL_ASSERT(ev->ev_flags & EVLIST_INIT);
750 ev->ev_flags &= ~(EVLIST_X_SIGFOUND|EVLIST_X_IOFOUND);
753 #ifdef EVMAP_USE_HT
754 HT_FOREACH(mapent, event_io_map, io) {
755 struct evmap_io *ctx = &(*mapent)->ent.evmap_io;
756 i = (*mapent)->fd;
757 #else
758 for (i = 0; i < io->nentries; ++i) {
759 struct evmap_io *ctx = io->entries[i];
761 if (!ctx)
762 continue;
763 #endif
765 TAILQ_FOREACH(ev, &ctx->events, ev_io_next) {
766 EVUTIL_ASSERT(!(ev->ev_flags & EVLIST_X_IOFOUND));
767 EVUTIL_ASSERT(ev->ev_fd == i);
768 ev->ev_flags |= EVLIST_X_IOFOUND;
769 nio++;
773 for (i = 0; i < sigmap->nentries; ++i) {
774 struct evmap_signal *ctx = sigmap->entries[i];
775 if (!ctx)
776 continue;
778 TAILQ_FOREACH(ev, &ctx->events, ev_signal_next) {
779 EVUTIL_ASSERT(!(ev->ev_flags & EVLIST_X_SIGFOUND));
780 EVUTIL_ASSERT(ev->ev_fd == i);
781 ev->ev_flags |= EVLIST_X_SIGFOUND;
782 nsignals++;
786 TAILQ_FOREACH(ev, &base->eventqueue, ev_next) {
787 if (ev->ev_events & (EV_READ|EV_WRITE)) {
788 EVUTIL_ASSERT(ev->ev_flags & EVLIST_X_IOFOUND);
789 --nio;
791 if (ev->ev_events & EV_SIGNAL) {
792 EVUTIL_ASSERT(ev->ev_flags & EVLIST_X_SIGFOUND);
793 --nsignals;
797 EVUTIL_ASSERT(nio == 0);
798 EVUTIL_ASSERT(nsignals == 0);
799 /* There is no "EVUTIL_ASSERT(ntimers == 0)": eventqueue is only for
800 * pending signals and io events.