2 __RCSID("$NetBSD: eloop.c,v 1.11 2015/05/16 23:31:32 roy Exp $");
5 * dhcpcd - DHCP client daemon
6 * Copyright (c) 2006-2015 Roy Marples <roy@marples.name>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 /* config.h should define HAVE_KQUEUE, HAVE_EPOLL, etc */
46 #define UNUSED(a) (void)((a))
50 #define __unused __attribute__((__unused__))
57 #define MSEC_PER_SEC 1000L
58 #define NSEC_PER_MSEC 1000000L
61 #if defined(HAVE_KQUEUE)
62 #include <sys/event.h>
65 /* udata is void * except on NetBSD
66 * lengths are int except on NetBSD */
67 #define UPTR(x) ((intptr_t)(x))
71 #define LENC(x) ((int)(x))
73 #define eloop_event_setup_fds(eloop)
74 #elif defined(HAVE_EPOLL)
75 #include <sys/epoll.h>
76 #define eloop_event_setup_fds(eloop)
80 eloop_event_setup_fds(struct eloop
*eloop
)
82 struct eloop_event
*e
;
86 TAILQ_FOREACH(e
, &eloop
->events
, next
) {
87 eloop
->fds
[i
].fd
= e
->fd
;
88 eloop
->fds
[i
].events
= 0;
90 eloop
->fds
[i
].events
|= POLLIN
;
92 eloop
->fds
[i
].events
|= POLLOUT
;
93 eloop
->fds
[i
].revents
= 0;
94 e
->pollfd
= &eloop
->fds
[i
];
100 /* Wrapper around pselect, to imitate the NetBSD pollts call. */
101 #if !defined(__minix)
103 #else /* defined(__minix) */
105 #endif /* defined(__minix) */
106 pollts(struct pollfd
* fds
, nfds_t nfds
,
107 const struct timespec
*ts
, const sigset_t
*sigmask
)
114 struct timeval tv
, *tvp
;
115 #endif /* defined(__minix) */
119 for (n
= 0; n
< nfds
; n
++) {
120 if (fds
[n
].events
& POLLIN
) {
121 FD_SET(fds
[n
].fd
, &read_fds
);
122 if (fds
[n
].fd
> maxfd
)
127 #if !defined(__minix)
128 r
= pselect(maxfd
+ 1, &read_fds
, NULL
, NULL
, ts
, sigmask
);
129 #else /* defined(__minix) */
130 /* XXX FIXME - horrible hack with race condition */
131 sigprocmask(SIG_SETMASK
, sigmask
, &omask
);
133 tv
.tv_sec
= ts
->tv_sec
;
134 tv
.tv_usec
= ts
->tv_nsec
/ 1000;
138 r
= select(maxfd
+ 1, &read_fds
, NULL
, NULL
, tvp
);
139 sigprocmask(SIG_SETMASK
, &omask
, NULL
);
140 #endif /* defined(__minix) */
142 for (n
= 0; n
< nfds
; n
++) {
144 FD_ISSET(fds
[n
].fd
, &read_fds
) ? POLLIN
: 0;
154 eloop_event_add(struct eloop
*eloop
, int fd
,
155 void (*read_cb
)(void *), void *read_cb_arg
,
156 void (*write_cb
)(void *), void *write_cb_arg
)
158 struct eloop_event
*e
;
159 #if defined(HAVE_KQUEUE)
161 #elif defined(HAVE_EPOLL)
162 struct epoll_event epe
;
167 assert(eloop
!= NULL
);
168 assert(read_cb
!= NULL
|| write_cb
!= NULL
);
175 memset(&epe
, 0, sizeof(epe
));
177 epe
.events
= EPOLLIN
;
179 epe
.events
|= EPOLLOUT
;
182 /* We should only have one callback monitoring the fd */
183 TAILQ_FOREACH(e
, &eloop
->events
, next
) {
187 #if defined(HAVE_KQUEUE)
188 EV_SET(&ke
[0], (uintptr_t)fd
, EVFILT_READ
, EV_ADD
,
191 EV_SET(&ke
[1], (uintptr_t)fd
, EVFILT_WRITE
,
192 EV_ADD
, 0, 0, UPTR(e
));
193 else if (e
->write_cb
)
194 EV_SET(&ke
[1], (uintptr_t)fd
, EVFILT_WRITE
,
195 EV_DELETE
, 0, 0, UPTR(e
));
196 error
= kevent(eloop
->poll_fd
, ke
,
197 e
->write_cb
|| write_cb
? 2 : 1, NULL
, 0, NULL
);
198 #elif defined(HAVE_EPOLL)
200 error
= epoll_ctl(eloop
->poll_fd
, EPOLL_CTL_MOD
,
206 e
->read_cb
= read_cb
;
207 e
->read_cb_arg
= read_cb_arg
;
210 e
->write_cb
= write_cb
;
211 e
->write_cb_arg
= write_cb_arg
;
213 eloop_event_setup_fds(eloop
);
218 /* Allocate a new event if no free ones already allocated */
219 if ((e
= TAILQ_FIRST(&eloop
->free_events
))) {
220 TAILQ_REMOVE(&eloop
->free_events
, e
, next
);
222 e
= malloc(sizeof(*e
));
227 /* Ensure we can actually listen to it */
229 #if !defined(HAVE_KQUEUE) && !defined(HAVE_EPOLL)
230 if (eloop
->events_len
> eloop
->fds_len
) {
231 nfds
= realloc(eloop
->fds
,
232 sizeof(*eloop
->fds
) * (eloop
->fds_len
+ 5));
240 /* Now populate the structure and add it to the list */
242 e
->read_cb
= read_cb
;
243 e
->read_cb_arg
= read_cb_arg
;
244 e
->write_cb
= write_cb
;
245 e
->write_cb_arg
= write_cb_arg
;
247 #if defined(HAVE_KQUEUE)
249 EV_SET(&ke
[0], (uintptr_t)fd
, EVFILT_READ
,
250 EV_ADD
, 0, 0, UPTR(e
));
251 if (write_cb
!= NULL
)
252 EV_SET(&ke
[1], (uintptr_t)fd
, EVFILT_WRITE
,
253 EV_ADD
, 0, 0, UPTR(e
));
254 if (kevent(eloop
->poll_fd
, ke
, write_cb
? 2 : 1, NULL
, 0, NULL
) == -1)
256 #elif defined(HAVE_EPOLL)
258 if (epoll_ctl(eloop
->poll_fd
, EPOLL_CTL_ADD
, fd
, &epe
) == -1)
262 /* The order of events should not matter.
263 * However, some PPP servers love to close the link right after
264 * sending their final message. So to ensure dhcpcd processes this
265 * message (which is likely to be that the DHCP addresses are wrong)
266 * we insert new events at the queue head as the link fd will be
267 * the first event added. */
268 TAILQ_INSERT_HEAD(&eloop
->events
, e
, next
);
269 eloop_event_setup_fds(eloop
);
275 TAILQ_INSERT_TAIL(&eloop
->free_events
, e
, next
);
281 eloop_event_delete_write(struct eloop
*eloop
, int fd
, int write_only
)
283 struct eloop_event
*e
;
284 #if defined(HAVE_KQUEUE)
286 #elif defined(HAVE_EPOLL)
287 struct epoll_event epe
;
290 assert(eloop
!= NULL
);
292 TAILQ_FOREACH(e
, &eloop
->events
, next
) {
294 if (write_only
&& e
->read_cb
!= NULL
) {
295 if (e
->write_cb
!= NULL
) {
297 e
->write_cb_arg
= NULL
;
298 #if defined(HAVE_KQUEUE)
299 EV_SET(&ke
[0], (uintptr_t)fd
,
300 EVFILT_WRITE
, EV_DELETE
,
302 kevent(eloop
->poll_fd
, ke
, 1, NULL
, 0,
304 #elif defined(HAVE_EPOLL)
305 memset(&epe
, 0, sizeof(epe
));
308 epe
.events
= EPOLLIN
;
309 epoll_ctl(eloop
->poll_fd
,
310 EPOLL_CTL_MOD
, fd
, &epe
);
314 TAILQ_REMOVE(&eloop
->events
, e
, next
);
315 #if defined(HAVE_KQUEUE)
316 EV_SET(&ke
[0], (uintptr_t)fd
, EVFILT_READ
,
317 EV_DELETE
, 0, 0, UPTR(NULL
));
319 EV_SET(&ke
[1], (uintptr_t)fd
,
320 EVFILT_WRITE
, EV_DELETE
,
322 kevent(eloop
->poll_fd
, ke
, e
->write_cb
? 2 : 1,
324 #elif defined(HAVE_EPOLL)
325 /* NULL event is safe because we
326 * rely on epoll_pwait which as added
327 * after the delete without event was fixed. */
328 epoll_ctl(eloop
->poll_fd
, EPOLL_CTL_DEL
,
331 TAILQ_INSERT_TAIL(&eloop
->free_events
, e
, next
);
334 eloop_event_setup_fds(eloop
);
341 eloop_q_timeout_add_tv(struct eloop
*eloop
, int queue
,
342 const struct timespec
*when
, void (*callback
)(void *), void *arg
)
344 struct timespec now
, w
;
345 struct eloop_timeout
*t
, *tt
= NULL
;
347 assert(eloop
!= NULL
);
348 assert(when
!= NULL
);
349 assert(callback
!= NULL
);
351 clock_gettime(CLOCK_MONOTONIC
, &now
);
352 timespecadd(&now
, when
, &w
);
353 /* Check for time_t overflow. */
354 if (timespeccmp(&w
, &now
, <)) {
359 /* Remove existing timeout if present */
360 TAILQ_FOREACH(t
, &eloop
->timeouts
, next
) {
361 if (t
->callback
== callback
&& t
->arg
== arg
) {
362 TAILQ_REMOVE(&eloop
->timeouts
, t
, next
);
368 /* No existing, so allocate or grab one from the free pool */
369 if ((t
= TAILQ_FIRST(&eloop
->free_timeouts
))) {
370 TAILQ_REMOVE(&eloop
->free_timeouts
, t
, next
);
372 if ((t
= malloc(sizeof(*t
))) == NULL
)
378 t
->callback
= callback
;
382 /* The timeout list should be in chronological order,
384 TAILQ_FOREACH(tt
, &eloop
->timeouts
, next
) {
385 if (timespeccmp(&t
->when
, &tt
->when
, <)) {
386 TAILQ_INSERT_BEFORE(tt
, t
, next
);
390 TAILQ_INSERT_TAIL(&eloop
->timeouts
, t
, next
);
395 eloop_q_timeout_add_sec(struct eloop
*eloop
, int queue
, time_t when
,
396 void (*callback
)(void *), void *arg
)
402 return eloop_q_timeout_add_tv(eloop
, queue
, &tv
, callback
, arg
);
406 eloop_q_timeout_add_msec(struct eloop
*eloop
, int queue
, long when
,
407 void (*callback
)(void *), void *arg
)
411 tv
.tv_sec
= when
/ MSEC_PER_SEC
;
412 tv
.tv_nsec
= (when
% MSEC_PER_SEC
) * NSEC_PER_MSEC
;
413 return eloop_q_timeout_add_tv(eloop
, queue
, &tv
, callback
, arg
);
416 #if !defined(HAVE_KQUEUE)
418 eloop_timeout_add_now(struct eloop
*eloop
,
419 void (*callback
)(void *), void *arg
)
422 assert(eloop
->timeout0
== NULL
);
423 eloop
->timeout0
= callback
;
424 eloop
->timeout0_arg
= arg
;
430 eloop_q_timeout_delete(struct eloop
*eloop
, int queue
,
431 void (*callback
)(void *), void *arg
)
433 struct eloop_timeout
*t
, *tt
;
435 assert(eloop
!= NULL
);
437 TAILQ_FOREACH_SAFE(t
, &eloop
->timeouts
, next
, tt
) {
438 if ((queue
== 0 || t
->queue
== queue
) &&
440 (!callback
|| t
->callback
== callback
))
442 TAILQ_REMOVE(&eloop
->timeouts
, t
, next
);
443 TAILQ_INSERT_TAIL(&eloop
->free_timeouts
, t
, next
);
449 eloop_exit(struct eloop
*eloop
, int code
)
452 assert(eloop
!= NULL
);
454 eloop
->exitcode
= code
;
458 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
460 eloop_open(struct eloop
*eloop
)
463 #if defined(HAVE_KQUEUE1)
464 return (eloop
->poll_fd
= kqueue1(O_CLOEXEC
));
465 #elif defined(HAVE_KQUEUE)
468 if ((eloop
->poll_fd
= kqueue()) == -1)
470 if ((i
= fcntl(eloop
->poll_fd
, F_GETFD
, 0)) == -1 ||
471 fcntl(eloop
->poll_fd
, F_SETFD
, i
| FD_CLOEXEC
) == -1)
473 close(eloop
->poll_fd
);
478 return eloop
->poll_fd
;
479 #elif defined (HAVE_EPOLL)
480 return (eloop
->poll_fd
= epoll_create1(EPOLL_CLOEXEC
));
485 eloop_requeue(struct eloop
*eloop
)
487 struct eloop_event
*e
;
489 #if defined(HAVE_KQUEUE)
492 #elif defined(HAVE_EPOLL)
493 struct epoll_event epe
;
496 assert(eloop
!= NULL
);
498 if (eloop
->poll_fd
!= -1)
499 close(eloop
->poll_fd
);
500 if (eloop_open(eloop
) == -1)
502 #if defined (HAVE_KQUEUE)
503 i
= eloop
->signals_len
;
504 TAILQ_FOREACH(e
, &eloop
->events
, next
) {
510 if ((ke
= malloc(sizeof(*ke
) * i
)) == NULL
)
513 for (i
= 0; i
< eloop
->signals_len
; i
++)
514 EV_SET(&ke
[i
], (uintptr_t)eloop
->signals
[i
],
515 EVFILT_SIGNAL
, EV_ADD
, 0, 0, UPTR(NULL
));
517 TAILQ_FOREACH(e
, &eloop
->events
, next
) {
518 EV_SET(&ke
[i
], (uintptr_t)e
->fd
, EVFILT_READ
,
519 EV_ADD
, 0, 0, UPTR(e
));
522 EV_SET(&ke
[i
], (uintptr_t)e
->fd
, EVFILT_WRITE
,
523 EV_ADD
, 0, 0, UPTR(e
));
528 error
= kevent(eloop
->poll_fd
, ke
, LENC(i
), NULL
, 0, NULL
);
531 #elif defined(HAVE_EPOLL)
534 TAILQ_FOREACH(e
, &eloop
->events
, next
) {
535 memset(&epe
, 0, sizeof(epe
));
537 epe
.events
= EPOLLIN
;
539 epe
.events
|= EPOLLOUT
;
541 if (epoll_ctl(eloop
->poll_fd
, EPOLL_CTL_ADD
, e
->fd
, &epe
) == -1)
551 eloop_signal_set_cb(struct eloop
*eloop
,
552 const int *signals
, size_t signals_len
,
553 void (*signal_cb
)(int, void *), void *signal_cb_ctx
)
556 assert(eloop
!= NULL
);
558 eloop
->signals
= signals
;
559 eloop
->signals_len
= signals_len
;
560 eloop
->signal_cb
= signal_cb
;
561 eloop
->signal_cb_ctx
= signal_cb_ctx
;
562 return eloop_requeue(eloop
);
566 struct eloop_siginfo
{
570 static struct eloop_siginfo _eloop_siginfo
;
571 static struct eloop
*_eloop
;
574 eloop_signal1(void *arg
)
576 struct eloop_siginfo
*si
= arg
;
578 si
->eloop
->signal_cb(si
->sig
, si
->eloop
->signal_cb_ctx
);
582 #if !defined(__minix)
583 eloop_signal3(int sig
, __unused siginfo_t
*siginfo
, __unused
void *arg
)
584 #else /* defined(__minix) */
585 eloop_signal3(int sig
)
586 #endif /* defined(__minix) */
589 /* So that we can operate safely under a signal we instruct
590 * eloop to pass a copy of the siginfo structure to handle_signal1
591 * as the very first thing to do. */
592 _eloop_siginfo
.eloop
= _eloop
;
593 _eloop_siginfo
.sig
= sig
;
594 eloop_timeout_add_now(_eloop_siginfo
.eloop
,
595 eloop_signal1
, &_eloop_siginfo
);
600 eloop_signal_mask(struct eloop
*eloop
, sigset_t
*oldset
)
608 assert(eloop
!= NULL
);
611 if (sigprocmask(SIG_SETMASK
, &newset
, oldset
) == -1)
617 memset(&sa
, 0, sizeof(sa
));
618 #if !defined(__minix)
619 sa
.sa_sigaction
= eloop_signal3
;
620 sa
.sa_flags
= SA_SIGINFO
;
621 #else /* defined(__minix) */
622 sa
.sa_handler
= eloop_signal3
;
623 #endif /* defined(__minix) */
624 sigemptyset(&sa
.sa_mask
);
626 for (i
= 0; i
< eloop
->signals_len
; i
++) {
627 if (sigaction(eloop
->signals
[i
], &sa
, NULL
) == -1)
640 /* Check we have a working monotonic clock. */
641 if (clock_gettime(CLOCK_MONOTONIC
, &now
) == -1)
644 eloop
= calloc(1, sizeof(*eloop
));
646 TAILQ_INIT(&eloop
->events
);
647 TAILQ_INIT(&eloop
->free_events
);
648 TAILQ_INIT(&eloop
->timeouts
);
649 TAILQ_INIT(&eloop
->free_timeouts
);
650 eloop
->exitcode
= EXIT_FAILURE
;
651 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
660 void eloop_free(struct eloop
*eloop
)
662 struct eloop_event
*e
;
663 struct eloop_timeout
*t
;
668 while ((e
= TAILQ_FIRST(&eloop
->events
))) {
669 TAILQ_REMOVE(&eloop
->events
, e
, next
);
672 while ((e
= TAILQ_FIRST(&eloop
->free_events
))) {
673 TAILQ_REMOVE(&eloop
->free_events
, e
, next
);
676 while ((t
= TAILQ_FIRST(&eloop
->timeouts
))) {
677 TAILQ_REMOVE(&eloop
->timeouts
, t
, next
);
680 while ((t
= TAILQ_FIRST(&eloop
->free_timeouts
))) {
681 TAILQ_REMOVE(&eloop
->free_timeouts
, t
, next
);
684 #if defined(HAVE_KQUEUE) || defined(HAVE_EPOLL)
685 close(eloop
->poll_fd
);
693 eloop_start(struct eloop
*eloop
, sigset_t
*signals
)
696 struct eloop_event
*e
;
697 struct eloop_timeout
*t
;
698 struct timespec now
, ts
, *tsp
;
700 #if defined(HAVE_KQUEUE)
703 #elif defined(HAVE_EPOLL)
704 struct epoll_event epe
;
712 assert(eloop
!= NULL
);
718 /* Run all timeouts first */
719 if (eloop
->timeout0
) {
720 t0
= eloop
->timeout0
;
721 eloop
->timeout0
= NULL
;
722 t0(eloop
->timeout0_arg
);
725 if ((t
= TAILQ_FIRST(&eloop
->timeouts
))) {
726 clock_gettime(CLOCK_MONOTONIC
, &now
);
727 if (timespeccmp(&now
, &t
->when
, >)) {
728 TAILQ_REMOVE(&eloop
->timeouts
, t
, next
);
730 TAILQ_INSERT_TAIL(&eloop
->free_timeouts
, t
, next
);
733 timespecsub(&t
->when
, &now
, &ts
);
736 /* No timeouts, so wait forever */
739 if (tsp
== NULL
&& eloop
->events_len
== 0)
745 else if (tsp
->tv_sec
> INT_MAX
/ 1000 ||
746 (tsp
->tv_sec
== INT_MAX
/ 1000 &&
747 (tsp
->tv_nsec
+ 999999) / 1000000 > INT_MAX
% 1000000))
750 timeout
= (int)(tsp
->tv_sec
* 1000 +
751 (tsp
->tv_nsec
+ 999999) / 1000000);
754 #if defined(HAVE_KQUEUE)
755 n
= kevent(eloop
->poll_fd
, NULL
, 0, &ke
, 1, tsp
);
756 #elif defined(HAVE_EPOLL)
758 n
= epoll_pwait(eloop
->poll_fd
, &epe
, 1,
761 n
= epoll_wait(eloop
->poll_fd
, &epe
, 1, timeout
);
764 n
= pollts(eloop
->fds
, (nfds_t
)eloop
->events_len
,
767 n
= poll(eloop
->fds
, (nfds_t
)eloop
->events_len
,
776 /* Process any triggered events.
777 * We go back to the start after calling each callback incase
778 * the current event or next event is removed. */
779 #if defined(HAVE_KQUEUE)
781 if (ke
.filter
== EVFILT_SIGNAL
) {
782 eloop
->signal_cb((int)ke
.ident
,
783 eloop
->signal_cb_ctx
);
786 e
= (struct eloop_event
*)ke
.udata
;
787 if (ke
.filter
== EVFILT_WRITE
) {
788 e
->write_cb(e
->write_cb_arg
);
790 } else if (ke
.filter
== EVFILT_READ
) {
791 e
->read_cb(e
->read_cb_arg
);
795 #elif defined(HAVE_EPOLL)
797 e
= (struct eloop_event
*)epe
.data
.ptr
;
798 if (epe
.events
& EPOLLOUT
&& e
->write_cb
!= NULL
) {
799 e
->write_cb(e
->write_cb_arg
);
803 (EPOLLIN
| EPOLLERR
| EPOLLHUP
) &&
806 e
->read_cb(e
->read_cb_arg
);
812 TAILQ_FOREACH(e
, &eloop
->events
, next
) {
813 if (e
->pollfd
->revents
& POLLOUT
&&
816 e
->write_cb(e
->write_cb_arg
);
819 if (e
->pollfd
->revents
&& e
->read_cb
!= NULL
) {
820 e
->read_cb(e
->read_cb_arg
);
828 return eloop
->exitcode
;