2 Unix SMB/CIFS implementation.
3 main select loop and event handling
4 Copyright (C) Andrew Tridgell 2003-2005
5 Copyright (C) Stefan Metzmacher 2005-2009
7 ** NOTE! The following LGPL license applies to the tevent
8 ** library. This does NOT imply that all of Samba is released
11 This library is free software; you can redistribute it and/or
12 modify it under the terms of the GNU Lesser General Public
13 License as published by the Free Software Foundation; either
14 version 3 of the License, or (at your option) any later version.
16 This library is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 Lesser General Public License for more details.
21 You should have received a copy of the GNU Lesser General Public
22 License along with this library; if not, see <http://www.gnu.org/licenses/>.
26 #include "system/filesys.h"
27 #include "system/select.h"
29 #include "tevent_util.h"
30 #include "tevent_internal.h"
32 struct poll_event_context
{
33 /* a pointer back to the generic event_context */
34 struct tevent_context
*ev
;
37 * one or more events were deleted or disabled
42 * These two arrays are maintained together.
44 * The following is always true:
47 * new 'fresh' elements are added at the end
48 * of the 'fdes' array and picked up later
49 * to the 'fds' array in poll_event_sync_arrays()
50 * before the poll() syscall.
54 struct tevent_fd
**fdes
;
58 * use tevent_common_wakeup(ev) to wake the poll() thread
64 create a poll_event_context structure.
66 static int poll_event_context_init(struct tevent_context
*ev
)
68 struct poll_event_context
*poll_ev
;
71 * we might be called during tevent_re_initialise()
72 * which means we need to free our old additional_data
73 * in order to detach old fd events from the
76 TALLOC_FREE(ev
->additional_data
);
78 poll_ev
= talloc_zero(ev
, struct poll_event_context
);
79 if (poll_ev
== NULL
) {
83 ev
->additional_data
= poll_ev
;
87 static int poll_event_context_init_mt(struct tevent_context
*ev
)
89 struct poll_event_context
*poll_ev
;
92 ret
= poll_event_context_init(ev
);
97 poll_ev
= talloc_get_type_abort(
98 ev
->additional_data
, struct poll_event_context
);
100 ret
= tevent_common_wakeup_init(ev
);
105 poll_ev
->use_mt_mode
= true;
110 static void poll_event_wake_pollthread(struct poll_event_context
*poll_ev
)
112 if (!poll_ev
->use_mt_mode
) {
115 tevent_common_wakeup(poll_ev
->ev
);
121 static int poll_event_fd_destructor(struct tevent_fd
*fde
)
123 struct tevent_context
*ev
= fde
->event_ctx
;
124 struct poll_event_context
*poll_ev
;
125 uint64_t del_idx
= fde
->additional_flags
;
131 poll_ev
= talloc_get_type_abort(
132 ev
->additional_data
, struct poll_event_context
);
134 if (del_idx
== UINT64_MAX
) {
138 poll_ev
->fdes
[del_idx
] = NULL
;
139 poll_ev
->deleted
= true;
140 poll_event_wake_pollthread(poll_ev
);
142 return tevent_common_fd_destructor(fde
);
145 static void poll_event_schedule_immediate(struct tevent_immediate
*im
,
146 struct tevent_context
*ev
,
147 tevent_immediate_handler_t handler
,
149 const char *handler_name
,
150 const char *location
)
152 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
153 ev
->additional_data
, struct poll_event_context
);
155 tevent_common_schedule_immediate(im
, ev
, handler
, private_data
,
156 handler_name
, location
);
157 poll_event_wake_pollthread(poll_ev
);
161 Private function called by "standard" backend fallback.
162 Note this only allows fallback to "poll" backend, not "poll-mt".
164 _PRIVATE_
bool tevent_poll_event_add_fd_internal(struct tevent_context
*ev
,
165 struct tevent_fd
*fde
)
167 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
168 ev
->additional_data
, struct poll_event_context
);
169 uint64_t fde_idx
= UINT64_MAX
;
172 fde
->additional_flags
= UINT64_MAX
;
173 tevent_common_fd_mpx_reinit(fde
);
174 talloc_set_destructor(fde
, poll_event_fd_destructor
);
176 if (fde
->flags
== 0) {
178 * Nothing more to do...
184 * We need to add it to the end of the 'fdes' array.
186 num_fdes
= poll_ev
->num_fdes
+ 1;
187 if (num_fdes
> talloc_array_length(poll_ev
->fdes
)) {
188 struct tevent_fd
**tmp_fdes
= NULL
;
191 array_length
= (num_fdes
+ 15) & ~15; /* round up to 16 */
193 tmp_fdes
= talloc_realloc(poll_ev
,
197 if (tmp_fdes
== NULL
) {
200 poll_ev
->fdes
= tmp_fdes
;
203 fde_idx
= poll_ev
->num_fdes
;
204 fde
->additional_flags
= fde_idx
;
205 poll_ev
->fdes
[fde_idx
] = fde
;
213 return NULL on failure (memory allocation error)
215 static struct tevent_fd
*poll_event_add_fd(struct tevent_context
*ev
,
217 int fd
, uint16_t flags
,
218 tevent_fd_handler_t handler
,
220 const char *handler_name
,
221 const char *location
)
223 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
224 ev
->additional_data
, struct poll_event_context
);
225 struct tevent_fd
*fde
;
232 fde
= tevent_common_add_fd(ev
,
244 ok
= tevent_poll_event_add_fd_internal(ev
, fde
);
249 poll_event_wake_pollthread(poll_ev
);
252 * poll_event_loop_poll will take care of the rest in
253 * poll_event_setup_fresh
259 map from TEVENT_FD_* to POLLIN/POLLOUT
261 static uint16_t poll_map_flags(uint16_t flags
)
263 uint16_t pollflags
= 0;
266 * we do not need to specify POLLERR | POLLHUP
267 * they are always reported.
270 if (flags
& TEVENT_FD_READ
) {
274 * Note that at least on Linux
275 * POLLRDHUP always returns
276 * POLLIN in addition, so this
277 * is not strictly needed, but
278 * we want to make it explicit.
280 pollflags
|= POLLRDHUP
;
283 if (flags
& TEVENT_FD_WRITE
) {
284 pollflags
|= POLLOUT
;
286 if (flags
& TEVENT_FD_ERROR
) {
288 pollflags
|= POLLRDHUP
;
296 set the fd event flags
298 static void poll_event_set_fd_flags(struct tevent_fd
*fde
, uint16_t flags
)
300 struct tevent_context
*ev
= fde
->event_ctx
;
301 struct poll_event_context
*poll_ev
;
302 uint64_t idx
= fde
->additional_flags
;
308 if (fde
->flags
== flags
) {
312 poll_ev
= talloc_get_type_abort(
313 ev
->additional_data
, struct poll_event_context
);
317 if (idx
== UINT64_MAX
) {
319 * We move it between the fresh and disabled lists.
321 tevent_poll_event_add_fd_internal(ev
, fde
);
322 poll_event_wake_pollthread(poll_ev
);
326 if (fde
->flags
== 0) {
328 * We need to remove it from the array
329 * and move it to the disabled list.
331 poll_ev
->fdes
[idx
] = NULL
;
332 poll_ev
->deleted
= true;
333 fde
->additional_flags
= UINT64_MAX
;
334 poll_event_wake_pollthread(poll_ev
);
338 if (idx
>= poll_ev
->num_fds
) {
340 * Not yet added to the
341 * poll_ev->fds array.
343 poll_event_wake_pollthread(poll_ev
);
347 poll_ev
->fds
[idx
].events
= poll_map_flags(flags
);
349 poll_event_wake_pollthread(poll_ev
);
352 static bool poll_event_sync_arrays(struct tevent_context
*ev
,
353 struct poll_event_context
*poll_ev
)
358 if (poll_ev
->deleted
) {
360 for (i
=0; i
< poll_ev
->num_fds
;) {
361 struct tevent_fd
*fde
= poll_ev
->fdes
[i
];
370 * This fde was talloc_free()'ed. Delete it
373 poll_ev
->num_fds
-= 1;
374 ci
= poll_ev
->num_fds
;
376 poll_ev
->fds
[i
] = poll_ev
->fds
[ci
];
377 poll_ev
->fdes
[i
] = poll_ev
->fdes
[ci
];
378 if (poll_ev
->fdes
[i
] != NULL
) {
379 poll_ev
->fdes
[i
]->additional_flags
= i
;
382 poll_ev
->fds
[ci
] = (struct pollfd
) { .fd
= -1 };
383 poll_ev
->fdes
[ci
] = NULL
;
385 poll_ev
->deleted
= false;
388 if (poll_ev
->num_fds
== poll_ev
->num_fdes
) {
393 * Recheck the size of both arrays and make sure
394 * poll_fd->fds array has at least the size of the
395 * in use poll_ev->fdes array.
397 if (poll_ev
->num_fdes
> talloc_array_length(poll_ev
->fds
)) {
398 struct pollfd
*tmp_fds
= NULL
;
401 * Make sure both allocated the same length.
403 array_length
= talloc_array_length(poll_ev
->fdes
);
405 tmp_fds
= talloc_realloc(poll_ev
,
409 if (tmp_fds
== NULL
) {
412 poll_ev
->fds
= tmp_fds
;
416 * Now setup the new elements.
418 for (i
= poll_ev
->num_fds
; i
< poll_ev
->num_fdes
; i
++) {
419 struct tevent_fd
*fde
= poll_ev
->fdes
[i
];
420 struct pollfd
*pfd
= &poll_ev
->fds
[poll_ev
->num_fds
];
426 if (i
> poll_ev
->num_fds
) {
427 poll_ev
->fdes
[poll_ev
->num_fds
] = fde
;
428 fde
->additional_flags
= poll_ev
->num_fds
;
429 poll_ev
->fdes
[i
] = NULL
;
433 pfd
->events
= poll_map_flags(fde
->flags
);
436 poll_ev
->num_fds
+= 1;
438 /* Both are in sync again */
439 poll_ev
->num_fdes
= poll_ev
->num_fds
;
442 * Check if we should shrink the arrays
443 * But keep at least 16 elements.
446 array_length
= (poll_ev
->num_fds
+ 15) & ~15; /* round up to 16 */
447 array_length
= MAX(array_length
, 16);
448 if (array_length
< talloc_array_length(poll_ev
->fdes
)) {
449 struct tevent_fd
**tmp_fdes
= NULL
;
450 struct pollfd
*tmp_fds
= NULL
;
452 tmp_fdes
= talloc_realloc(poll_ev
,
456 if (tmp_fdes
== NULL
) {
459 poll_ev
->fdes
= tmp_fdes
;
461 tmp_fds
= talloc_realloc(poll_ev
,
465 if (tmp_fds
== NULL
) {
468 poll_ev
->fds
= tmp_fds
;
475 event loop handling using poll()
477 static int poll_event_loop_poll(struct tevent_context
*ev
,
478 struct timeval
*tvalp
)
480 struct poll_event_context
*poll_ev
= talloc_get_type_abort(
481 ev
->additional_data
, struct poll_event_context
);
485 struct tevent_fd
*fde
= NULL
;
486 struct tevent_fd
*next
= NULL
;
490 if (ev
->signal_events
&& tevent_common_check_signal(ev
)) {
495 timeout
= tvalp
->tv_sec
* 1000;
496 timeout
+= (tvalp
->tv_usec
+ 999) / 1000;
499 ok
= poll_event_sync_arrays(ev
, poll_ev
);
504 tevent_trace_point_callback(poll_ev
->ev
, TEVENT_TRACE_BEFORE_WAIT
);
505 pollrtn
= poll(poll_ev
->fds
, poll_ev
->num_fds
, timeout
);
507 tevent_trace_point_callback(poll_ev
->ev
, TEVENT_TRACE_AFTER_WAIT
);
509 if (pollrtn
== -1 && poll_errno
== EINTR
&& ev
->signal_events
) {
510 tevent_common_check_signal(ev
);
514 if (pollrtn
== 0 && tvalp
) {
515 /* we don't care about a possible delay here */
516 tevent_common_loop_timer_delay(ev
);
527 /* at least one file descriptor is ready - check
528 which ones and call the handler, being careful to allow
529 the handler to remove itself when called */
531 for (fde
= ev
->fd_events
; fde
; fde
= next
) {
532 uint64_t idx
= fde
->additional_flags
;
538 if (idx
== UINT64_MAX
) {
542 pfd
= &poll_ev
->fds
[idx
];
544 if (pfd
->revents
& POLLNVAL
) {
546 * the socket is dead! this should never
547 * happen as the socket should have first been
548 * made readable and that should have removed
549 * the event, so this must be a bug.
551 * We ignore it here to match the epoll
554 tevent_debug(ev
, TEVENT_DEBUG_ERROR
,
555 "POLLNVAL on fde[%p] fd[%d] - disabling\n",
557 poll_ev
->fdes
[idx
] = NULL
;
558 poll_ev
->deleted
= true;
559 tevent_common_fd_disarm(fde
);
564 #define __POLL_RETURN_ERROR_FLAGS (POLLHUP|POLLERR|POLLRDHUP)
566 #define __POLL_RETURN_ERROR_FLAGS (POLLHUP|POLLERR)
569 if (pfd
->revents
& __POLL_RETURN_ERROR_FLAGS
) {
571 * If we only wait for TEVENT_FD_WRITE, we
572 * should not tell the event handler about it,
573 * and remove the writable flag, as we only
574 * report errors when waiting for read events
575 * or explicit for errors.
577 if (!(fde
->flags
& (TEVENT_FD_READ
|TEVENT_FD_ERROR
)))
579 TEVENT_FD_NOT_WRITEABLE(fde
);
582 if (fde
->flags
& TEVENT_FD_ERROR
) {
583 flags
|= TEVENT_FD_ERROR
;
585 if (fde
->flags
& TEVENT_FD_READ
) {
586 flags
|= TEVENT_FD_READ
;
589 if (pfd
->revents
& POLLIN
) {
590 flags
|= TEVENT_FD_READ
;
592 if (pfd
->revents
& POLLOUT
) {
593 flags
|= TEVENT_FD_WRITE
;
596 * Note that fde->flags could be changed when using
597 * the poll_mt backend together with threads,
598 * that why we need to check pfd->revents and fde->flags
602 DLIST_DEMOTE(ev
->fd_events
, fde
);
603 return tevent_common_invoke_fd_handler(fde
, flags
, NULL
);
607 for (i
= 0; i
< poll_ev
->num_fds
; i
++) {
608 if (poll_ev
->fds
[i
].revents
& POLLNVAL
) {
610 * the socket is dead! this should never
611 * happen as the socket should have first been
612 * made readable and that should have removed
613 * the event, so this must be a bug or
614 * a race in the poll_mt usage.
616 fde
= poll_ev
->fdes
[i
];
617 tevent_debug(ev
, TEVENT_DEBUG_WARNING
,
618 "POLLNVAL on dangling fd[%d] fde[%p] - disabling\n",
619 poll_ev
->fds
[i
].fd
, fde
);
620 poll_ev
->fdes
[i
] = NULL
;
621 poll_ev
->deleted
= true;
623 tevent_common_fd_disarm(fde
);
632 do a single event loop using the events defined in ev
634 static int poll_event_loop_once(struct tevent_context
*ev
,
635 const char *location
)
639 if (ev
->signal_events
&&
640 tevent_common_check_signal(ev
)) {
644 if (ev
->threaded_contexts
!= NULL
) {
645 tevent_common_threaded_activate_immediate(ev
);
648 if (ev
->immediate_events
&&
649 tevent_common_loop_immediate(ev
)) {
653 tval
= tevent_common_loop_timer_delay(ev
);
654 if (tevent_timeval_is_zero(&tval
)) {
658 return poll_event_loop_poll(ev
, &tval
);
661 static const struct tevent_ops poll_event_ops
= {
662 .context_init
= poll_event_context_init
,
663 .add_fd
= poll_event_add_fd
,
664 .set_fd_close_fn
= tevent_common_fd_set_close_fn
,
665 .get_fd_flags
= tevent_common_fd_get_flags
,
666 .set_fd_flags
= poll_event_set_fd_flags
,
667 .add_timer
= tevent_common_add_timer_v2
,
668 .schedule_immediate
= tevent_common_schedule_immediate
,
669 .add_signal
= tevent_common_add_signal
,
670 .loop_once
= poll_event_loop_once
,
671 .loop_wait
= tevent_common_loop_wait
,
674 _PRIVATE_
bool tevent_poll_init(void)
676 return tevent_register_backend("poll", &poll_event_ops
);
679 static const struct tevent_ops poll_event_mt_ops
= {
680 .context_init
= poll_event_context_init_mt
,
681 .add_fd
= poll_event_add_fd
,
682 .set_fd_close_fn
= tevent_common_fd_set_close_fn
,
683 .get_fd_flags
= tevent_common_fd_get_flags
,
684 .set_fd_flags
= poll_event_set_fd_flags
,
685 .add_timer
= tevent_common_add_timer_v2
,
686 .schedule_immediate
= poll_event_schedule_immediate
,
687 .add_signal
= tevent_common_add_signal
,
688 .loop_once
= poll_event_loop_once
,
689 .loop_wait
= tevent_common_loop_wait
,
692 _PRIVATE_
bool tevent_poll_mt_init(void)
694 return tevent_register_backend("poll_mt", &poll_event_mt_ops
);