1 /* $NetBSD: bufferevent.c,v 1.3 2015/01/29 07:26:02 spz Exp $ */
3 * Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu>
4 * Copyright (c) 2007-2012 Niels Provos, Nick Mathewson
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/types.h>
31 #include "event2/event-config.h"
32 #include <sys/cdefs.h>
33 __RCSID("$NetBSD: bufferevent.c,v 1.3 2015/01/29 07:26:02 spz Exp $");
35 #ifdef _EVENT_HAVE_SYS_TIME_H
43 #ifdef _EVENT_HAVE_STDARG_H
52 #include "event2/util.h"
53 #include "event2/buffer.h"
54 #include "event2/buffer_compat.h"
55 #include "event2/bufferevent.h"
56 #include "event2/bufferevent_struct.h"
57 #include "event2/bufferevent_compat.h"
58 #include "event2/event.h"
59 #include "log-internal.h"
60 #include "mm-internal.h"
61 #include "bufferevent-internal.h"
62 #include "evbuffer-internal.h"
63 #include "util-internal.h"
65 static void _bufferevent_cancel_all(struct bufferevent
*bev
);
69 bufferevent_suspend_read(struct bufferevent
*bufev
, bufferevent_suspend_flags what
)
71 struct bufferevent_private
*bufev_private
=
72 EVUTIL_UPCAST(bufev
, struct bufferevent_private
, bev
);
74 if (!bufev_private
->read_suspended
)
75 bufev
->be_ops
->disable(bufev
, EV_READ
);
76 bufev_private
->read_suspended
|= what
;
81 bufferevent_unsuspend_read(struct bufferevent
*bufev
, bufferevent_suspend_flags what
)
83 struct bufferevent_private
*bufev_private
=
84 EVUTIL_UPCAST(bufev
, struct bufferevent_private
, bev
);
86 bufev_private
->read_suspended
&= ~what
;
87 if (!bufev_private
->read_suspended
&& (bufev
->enabled
& EV_READ
))
88 bufev
->be_ops
->enable(bufev
, EV_READ
);
93 bufferevent_suspend_write(struct bufferevent
*bufev
, bufferevent_suspend_flags what
)
95 struct bufferevent_private
*bufev_private
=
96 EVUTIL_UPCAST(bufev
, struct bufferevent_private
, bev
);
98 if (!bufev_private
->write_suspended
)
99 bufev
->be_ops
->disable(bufev
, EV_WRITE
);
100 bufev_private
->write_suspended
|= what
;
105 bufferevent_unsuspend_write(struct bufferevent
*bufev
, bufferevent_suspend_flags what
)
107 struct bufferevent_private
*bufev_private
=
108 EVUTIL_UPCAST(bufev
, struct bufferevent_private
, bev
);
110 bufev_private
->write_suspended
&= ~what
;
111 if (!bufev_private
->write_suspended
&& (bufev
->enabled
& EV_WRITE
))
112 bufev
->be_ops
->enable(bufev
, EV_WRITE
);
117 /* Callback to implement watermarks on the input buffer. Only enabled
118 * if the watermark is set. */
120 bufferevent_inbuf_wm_cb(struct evbuffer
*buf
,
121 const struct evbuffer_cb_info
*cbinfo
,
124 struct bufferevent
*bufev
= arg
;
127 size
= evbuffer_get_length(buf
);
129 if (size
>= bufev
->wm_read
.high
)
130 bufferevent_wm_suspend_read(bufev
);
132 bufferevent_wm_unsuspend_read(bufev
);
136 bufferevent_run_deferred_callbacks_locked(struct deferred_cb
*_
, void *arg
)
138 struct bufferevent_private
*bufev_private
= arg
;
139 struct bufferevent
*bufev
= &bufev_private
->bev
;
142 if ((bufev_private
->eventcb_pending
& BEV_EVENT_CONNECTED
) &&
144 /* The "connected" happened before any reads or writes, so
146 bufev_private
->eventcb_pending
&= ~BEV_EVENT_CONNECTED
;
147 bufev
->errorcb(bufev
, BEV_EVENT_CONNECTED
, bufev
->cbarg
);
149 if (bufev_private
->readcb_pending
&& bufev
->readcb
) {
150 bufev_private
->readcb_pending
= 0;
151 bufev
->readcb(bufev
, bufev
->cbarg
);
153 if (bufev_private
->writecb_pending
&& bufev
->writecb
) {
154 bufev_private
->writecb_pending
= 0;
155 bufev
->writecb(bufev
, bufev
->cbarg
);
157 if (bufev_private
->eventcb_pending
&& bufev
->errorcb
) {
158 short what
= bufev_private
->eventcb_pending
;
159 int err
= bufev_private
->errno_pending
;
160 bufev_private
->eventcb_pending
= 0;
161 bufev_private
->errno_pending
= 0;
162 EVUTIL_SET_SOCKET_ERROR(err
);
163 bufev
->errorcb(bufev
, what
, bufev
->cbarg
);
165 _bufferevent_decref_and_unlock(bufev
);
169 bufferevent_run_deferred_callbacks_unlocked(struct deferred_cb
*_
, void *arg
)
171 struct bufferevent_private
*bufev_private
= arg
;
172 struct bufferevent
*bufev
= &bufev_private
->bev
;
175 #define UNLOCKED(stmt) \
176 do { BEV_UNLOCK(bufev); stmt; BEV_LOCK(bufev); } while(0)
178 if ((bufev_private
->eventcb_pending
& BEV_EVENT_CONNECTED
) &&
180 /* The "connected" happened before any reads or writes, so
182 bufferevent_event_cb errorcb
= bufev
->errorcb
;
183 void *cbarg
= bufev
->cbarg
;
184 bufev_private
->eventcb_pending
&= ~BEV_EVENT_CONNECTED
;
185 UNLOCKED(errorcb(bufev
, BEV_EVENT_CONNECTED
, cbarg
));
187 if (bufev_private
->readcb_pending
&& bufev
->readcb
) {
188 bufferevent_data_cb readcb
= bufev
->readcb
;
189 void *cbarg
= bufev
->cbarg
;
190 bufev_private
->readcb_pending
= 0;
191 UNLOCKED(readcb(bufev
, cbarg
));
193 if (bufev_private
->writecb_pending
&& bufev
->writecb
) {
194 bufferevent_data_cb writecb
= bufev
->writecb
;
195 void *cbarg
= bufev
->cbarg
;
196 bufev_private
->writecb_pending
= 0;
197 UNLOCKED(writecb(bufev
, cbarg
));
199 if (bufev_private
->eventcb_pending
&& bufev
->errorcb
) {
200 bufferevent_event_cb errorcb
= bufev
->errorcb
;
201 void *cbarg
= bufev
->cbarg
;
202 short what
= bufev_private
->eventcb_pending
;
203 int err
= bufev_private
->errno_pending
;
204 bufev_private
->eventcb_pending
= 0;
205 bufev_private
->errno_pending
= 0;
206 EVUTIL_SET_SOCKET_ERROR(err
);
207 UNLOCKED(errorcb(bufev
,what
,cbarg
));
209 _bufferevent_decref_and_unlock(bufev
);
213 #define SCHEDULE_DEFERRED(bevp) \
215 bufferevent_incref(&(bevp)->bev); \
216 event_deferred_cb_schedule( \
217 event_base_get_deferred_cb_queue((bevp)->bev.ev_base), \
218 &(bevp)->deferred); \
219 } while (/*CONSTCOND*/0)
223 _bufferevent_run_readcb(struct bufferevent
*bufev
)
225 /* Requires that we hold the lock and a reference */
226 struct bufferevent_private
*p
=
227 EVUTIL_UPCAST(bufev
, struct bufferevent_private
, bev
);
228 if (bufev
->readcb
== NULL
)
230 if (p
->options
& BEV_OPT_DEFER_CALLBACKS
) {
231 p
->readcb_pending
= 1;
232 if (!p
->deferred
.queued
)
233 SCHEDULE_DEFERRED(p
);
235 bufev
->readcb(bufev
, bufev
->cbarg
);
240 _bufferevent_run_writecb(struct bufferevent
*bufev
)
242 /* Requires that we hold the lock and a reference */
243 struct bufferevent_private
*p
=
244 EVUTIL_UPCAST(bufev
, struct bufferevent_private
, bev
);
245 if (bufev
->writecb
== NULL
)
247 if (p
->options
& BEV_OPT_DEFER_CALLBACKS
) {
248 p
->writecb_pending
= 1;
249 if (!p
->deferred
.queued
)
250 SCHEDULE_DEFERRED(p
);
252 bufev
->writecb(bufev
, bufev
->cbarg
);
257 _bufferevent_run_eventcb(struct bufferevent
*bufev
, short what
)
259 /* Requires that we hold the lock and a reference */
260 struct bufferevent_private
*p
=
261 EVUTIL_UPCAST(bufev
, struct bufferevent_private
, bev
);
262 if (bufev
->errorcb
== NULL
)
264 if (p
->options
& BEV_OPT_DEFER_CALLBACKS
) {
265 p
->eventcb_pending
|= what
;
266 p
->errno_pending
= EVUTIL_SOCKET_ERROR();
267 if (!p
->deferred
.queued
)
268 SCHEDULE_DEFERRED(p
);
270 bufev
->errorcb(bufev
, what
, bufev
->cbarg
);
275 bufferevent_init_common(struct bufferevent_private
*bufev_private
,
276 struct event_base
*base
,
277 const struct bufferevent_ops
*ops
,
278 enum bufferevent_options options
)
280 struct bufferevent
*bufev
= &bufev_private
->bev
;
283 if ((bufev
->input
= evbuffer_new()) == NULL
)
287 if (!bufev
->output
) {
288 if ((bufev
->output
= evbuffer_new()) == NULL
) {
289 evbuffer_free(bufev
->input
);
294 bufev_private
->refcnt
= 1;
295 bufev
->ev_base
= base
;
297 /* Disable timeouts. */
298 evutil_timerclear(&bufev
->timeout_read
);
299 evutil_timerclear(&bufev
->timeout_write
);
304 * Set to EV_WRITE so that using bufferevent_write is going to
305 * trigger a callback. Reading needs to be explicitly enabled
306 * because otherwise no data will be available.
308 bufev
->enabled
= EV_WRITE
;
310 #ifndef _EVENT_DISABLE_THREAD_SUPPORT
311 if (options
& BEV_OPT_THREADSAFE
) {
312 if (bufferevent_enable_locking(bufev
, NULL
) < 0) {
314 evbuffer_free(bufev
->input
);
315 evbuffer_free(bufev
->output
);
317 bufev
->output
= NULL
;
322 if ((options
& (BEV_OPT_DEFER_CALLBACKS
|BEV_OPT_UNLOCK_CALLBACKS
))
323 == BEV_OPT_UNLOCK_CALLBACKS
) {
324 event_warnx("UNLOCK_CALLBACKS requires DEFER_CALLBACKS");
327 if (options
& BEV_OPT_DEFER_CALLBACKS
) {
328 if (options
& BEV_OPT_UNLOCK_CALLBACKS
)
329 event_deferred_cb_init(&bufev_private
->deferred
,
330 bufferevent_run_deferred_callbacks_unlocked
,
333 event_deferred_cb_init(&bufev_private
->deferred
,
334 bufferevent_run_deferred_callbacks_locked
,
338 bufev_private
->options
= options
;
340 evbuffer_set_parent(bufev
->input
, bufev
);
341 evbuffer_set_parent(bufev
->output
, bufev
);
347 bufferevent_setcb(struct bufferevent
*bufev
,
348 bufferevent_data_cb readcb
, bufferevent_data_cb writecb
,
349 bufferevent_event_cb eventcb
, void *cbarg
)
353 bufev
->readcb
= readcb
;
354 bufev
->writecb
= writecb
;
355 bufev
->errorcb
= eventcb
;
357 bufev
->cbarg
= cbarg
;
362 bufferevent_get_input(struct bufferevent
*bufev
)
368 bufferevent_get_output(struct bufferevent
*bufev
)
370 return bufev
->output
;
374 bufferevent_get_base(struct bufferevent
*bufev
)
376 return bufev
->ev_base
;
380 bufferevent_write(struct bufferevent
*bufev
, const void *data
, size_t size
)
382 if (evbuffer_add(bufev
->output
, data
, size
) == -1)
389 bufferevent_write_buffer(struct bufferevent
*bufev
, struct evbuffer
*buf
)
391 if (evbuffer_add_buffer(bufev
->output
, buf
) == -1)
398 bufferevent_read(struct bufferevent
*bufev
, void *data
, size_t size
)
400 return (evbuffer_remove(bufev
->input
, data
, size
));
404 bufferevent_read_buffer(struct bufferevent
*bufev
, struct evbuffer
*buf
)
406 return (evbuffer_add_buffer(buf
, bufev
->input
));
410 bufferevent_enable(struct bufferevent
*bufev
, short event
)
412 struct bufferevent_private
*bufev_private
=
413 EVUTIL_UPCAST(bufev
, struct bufferevent_private
, bev
);
414 short impl_events
= event
;
417 _bufferevent_incref_and_lock(bufev
);
418 if (bufev_private
->read_suspended
)
419 impl_events
&= ~EV_READ
;
420 if (bufev_private
->write_suspended
)
421 impl_events
&= ~EV_WRITE
;
423 bufev
->enabled
|= event
;
425 if (impl_events
&& bufev
->be_ops
->enable(bufev
, impl_events
) < 0)
428 _bufferevent_decref_and_unlock(bufev
);
433 bufferevent_set_timeouts(struct bufferevent
*bufev
,
434 const struct timeval
*tv_read
,
435 const struct timeval
*tv_write
)
440 bufev
->timeout_read
= *tv_read
;
442 evutil_timerclear(&bufev
->timeout_read
);
445 bufev
->timeout_write
= *tv_write
;
447 evutil_timerclear(&bufev
->timeout_write
);
450 if (bufev
->be_ops
->adj_timeouts
)
451 r
= bufev
->be_ops
->adj_timeouts(bufev
);
458 /* Obsolete; use bufferevent_set_timeouts */
460 bufferevent_settimeout(struct bufferevent
*bufev
,
461 int timeout_read
, int timeout_write
)
463 struct timeval tv_read
, tv_write
;
464 struct timeval
*ptv_read
= NULL
, *ptv_write
= NULL
;
466 memset(&tv_read
, 0, sizeof(tv_read
));
467 memset(&tv_write
, 0, sizeof(tv_write
));
470 tv_read
.tv_sec
= timeout_read
;
474 tv_write
.tv_sec
= timeout_write
;
475 ptv_write
= &tv_write
;
478 bufferevent_set_timeouts(bufev
, ptv_read
, ptv_write
);
483 bufferevent_disable_hard(struct bufferevent
*bufev
, short event
)
486 struct bufferevent_private
*bufev_private
=
487 EVUTIL_UPCAST(bufev
, struct bufferevent_private
, bev
);
490 bufev
->enabled
&= ~event
;
492 bufev_private
->connecting
= 0;
493 if (bufev
->be_ops
->disable(bufev
, event
) < 0)
501 bufferevent_disable(struct bufferevent
*bufev
, short event
)
506 bufev
->enabled
&= ~event
;
508 if (bufev
->be_ops
->disable(bufev
, event
) < 0)
516 * Sets the water marks
520 bufferevent_setwatermark(struct bufferevent
*bufev
, short events
,
521 size_t lowmark
, size_t highmark
)
523 struct bufferevent_private
*bufev_private
=
524 EVUTIL_UPCAST(bufev
, struct bufferevent_private
, bev
);
527 if (events
& EV_WRITE
) {
528 bufev
->wm_write
.low
= lowmark
;
529 bufev
->wm_write
.high
= highmark
;
532 if (events
& EV_READ
) {
533 bufev
->wm_read
.low
= lowmark
;
534 bufev
->wm_read
.high
= highmark
;
537 /* There is now a new high-water mark for read.
538 enable the callback if needed, and see if we should
539 suspend/bufferevent_wm_unsuspend. */
541 if (bufev_private
->read_watermarks_cb
== NULL
) {
542 bufev_private
->read_watermarks_cb
=
543 evbuffer_add_cb(bufev
->input
,
544 bufferevent_inbuf_wm_cb
,
547 evbuffer_cb_set_flags(bufev
->input
,
548 bufev_private
->read_watermarks_cb
,
549 EVBUFFER_CB_ENABLED
|EVBUFFER_CB_NODEFER
);
551 if (evbuffer_get_length(bufev
->input
) >= highmark
)
552 bufferevent_wm_suspend_read(bufev
);
553 else if (evbuffer_get_length(bufev
->input
) < highmark
)
554 bufferevent_wm_unsuspend_read(bufev
);
556 /* There is now no high-water mark for read. */
557 if (bufev_private
->read_watermarks_cb
)
558 evbuffer_cb_clear_flags(bufev
->input
,
559 bufev_private
->read_watermarks_cb
,
560 EVBUFFER_CB_ENABLED
);
561 bufferevent_wm_unsuspend_read(bufev
);
568 bufferevent_flush(struct bufferevent
*bufev
,
570 enum bufferevent_flush_mode mode
)
574 if (bufev
->be_ops
->flush
)
575 r
= bufev
->be_ops
->flush(bufev
, iotype
, mode
);
581 _bufferevent_incref_and_lock(struct bufferevent
*bufev
)
583 struct bufferevent_private
*bufev_private
=
586 ++bufev_private
->refcnt
;
591 _bufferevent_transfer_lock_ownership(struct bufferevent
*donor
,
592 struct bufferevent
*recipient
)
594 struct bufferevent_private
*d
= BEV_UPCAST(donor
);
595 struct bufferevent_private
*r
= BEV_UPCAST(recipient
);
596 if (d
->lock
!= r
->lock
)
608 _bufferevent_decref_and_unlock(struct bufferevent
*bufev
)
610 struct bufferevent_private
*bufev_private
=
611 EVUTIL_UPCAST(bufev
, struct bufferevent_private
, bev
);
612 struct bufferevent
*underlying
;
614 EVUTIL_ASSERT(bufev_private
->refcnt
> 0);
616 if (--bufev_private
->refcnt
) {
621 underlying
= bufferevent_get_underlying(bufev
);
623 /* Clean up the shared info */
624 if (bufev
->be_ops
->destruct
)
625 bufev
->be_ops
->destruct(bufev
);
627 /* XXX what happens if refcnt for these buffers is > 1?
628 * The buffers can share a lock with this bufferevent object,
629 * but the lock might be destroyed below. */
630 /* evbuffer will free the callbacks */
631 evbuffer_free(bufev
->input
);
632 evbuffer_free(bufev
->output
);
634 if (bufev_private
->rate_limiting
) {
635 if (bufev_private
->rate_limiting
->group
)
636 bufferevent_remove_from_rate_limit_group_internal(bufev
,0);
637 if (event_initialized(&bufev_private
->rate_limiting
->refill_bucket_event
))
638 event_del(&bufev_private
->rate_limiting
->refill_bucket_event
);
639 event_debug_unassign(&bufev_private
->rate_limiting
->refill_bucket_event
);
640 mm_free(bufev_private
->rate_limiting
);
641 bufev_private
->rate_limiting
= NULL
;
644 event_debug_unassign(&bufev
->ev_read
);
645 event_debug_unassign(&bufev
->ev_write
);
648 if (bufev_private
->own_lock
)
649 EVTHREAD_FREE_LOCK(bufev_private
->lock
,
650 EVTHREAD_LOCKTYPE_RECURSIVE
);
652 /* Free the actual allocated memory. */
653 mm_free(((char*)bufev
) - bufev
->be_ops
->mem_offset
);
655 /* Release the reference to underlying now that we no longer need the
656 * reference to it. We wait this long mainly in case our lock is
657 * shared with underlying.
659 * The 'destruct' function will also drop a reference to underlying
660 * if BEV_OPT_CLOSE_ON_FREE is set.
662 * XXX Should we/can we just refcount evbuffer/bufferevent locks?
663 * It would probably save us some headaches.
666 bufferevent_decref(underlying
);
672 bufferevent_decref(struct bufferevent
*bufev
)
675 return _bufferevent_decref_and_unlock(bufev
);
679 bufferevent_free(struct bufferevent
*bufev
)
682 bufferevent_setcb(bufev
, NULL
, NULL
, NULL
, NULL
);
683 _bufferevent_cancel_all(bufev
);
684 _bufferevent_decref_and_unlock(bufev
);
688 bufferevent_incref(struct bufferevent
*bufev
)
690 struct bufferevent_private
*bufev_private
=
691 EVUTIL_UPCAST(bufev
, struct bufferevent_private
, bev
);
694 ++bufev_private
->refcnt
;
699 bufferevent_enable_locking(struct bufferevent
*bufev
, void *lock
)
701 #ifdef _EVENT_DISABLE_THREAD_SUPPORT
704 struct bufferevent
*underlying
;
706 if (BEV_UPCAST(bufev
)->lock
)
708 underlying
= bufferevent_get_underlying(bufev
);
710 if (!lock
&& underlying
&& BEV_UPCAST(underlying
)->lock
) {
711 lock
= BEV_UPCAST(underlying
)->lock
;
712 BEV_UPCAST(bufev
)->lock
= lock
;
713 BEV_UPCAST(bufev
)->own_lock
= 0;
715 EVTHREAD_ALLOC_LOCK(lock
, EVTHREAD_LOCKTYPE_RECURSIVE
);
718 BEV_UPCAST(bufev
)->lock
= lock
;
719 BEV_UPCAST(bufev
)->own_lock
= 1;
721 BEV_UPCAST(bufev
)->lock
= lock
;
722 BEV_UPCAST(bufev
)->own_lock
= 0;
724 evbuffer_enable_locking(bufev
->input
, lock
);
725 evbuffer_enable_locking(bufev
->output
, lock
);
727 if (underlying
&& !BEV_UPCAST(underlying
)->lock
)
728 bufferevent_enable_locking(underlying
, lock
);
735 bufferevent_setfd(struct bufferevent
*bev
, evutil_socket_t fd
)
737 union bufferevent_ctrl_data d
;
741 if (bev
->be_ops
->ctrl
)
742 res
= bev
->be_ops
->ctrl(bev
, BEV_CTRL_SET_FD
, &d
);
748 bufferevent_getfd(struct bufferevent
*bev
)
750 union bufferevent_ctrl_data d
;
754 if (bev
->be_ops
->ctrl
)
755 res
= bev
->be_ops
->ctrl(bev
, BEV_CTRL_GET_FD
, &d
);
757 return (res
<0) ? -1 : d
.fd
;
761 _bufferevent_cancel_all(struct bufferevent
*bev
)
763 union bufferevent_ctrl_data d
;
764 memset(&d
, 0, sizeof(d
));
766 if (bev
->be_ops
->ctrl
)
767 bev
->be_ops
->ctrl(bev
, BEV_CTRL_CANCEL_ALL
, &d
);
772 bufferevent_get_enabled(struct bufferevent
*bufev
)
782 bufferevent_get_underlying(struct bufferevent
*bev
)
784 union bufferevent_ctrl_data d
;
788 if (bev
->be_ops
->ctrl
)
789 res
= bev
->be_ops
->ctrl(bev
, BEV_CTRL_GET_UNDERLYING
, &d
);
791 return (res
<0) ? NULL
: d
.ptr
;
795 bufferevent_generic_read_timeout_cb(evutil_socket_t fd
, short event
, void *ctx
)
797 struct bufferevent
*bev
= ctx
;
798 _bufferevent_incref_and_lock(bev
);
799 bufferevent_disable(bev
, EV_READ
);
800 _bufferevent_run_eventcb(bev
, BEV_EVENT_TIMEOUT
|BEV_EVENT_READING
);
801 _bufferevent_decref_and_unlock(bev
);
804 bufferevent_generic_write_timeout_cb(evutil_socket_t fd
, short event
, void *ctx
)
806 struct bufferevent
*bev
= ctx
;
807 _bufferevent_incref_and_lock(bev
);
808 bufferevent_disable(bev
, EV_WRITE
);
809 _bufferevent_run_eventcb(bev
, BEV_EVENT_TIMEOUT
|BEV_EVENT_WRITING
);
810 _bufferevent_decref_and_unlock(bev
);
814 _bufferevent_init_generic_timeout_cbs(struct bufferevent
*bev
)
816 evtimer_assign(&bev
->ev_read
, bev
->ev_base
,
817 bufferevent_generic_read_timeout_cb
, bev
);
818 evtimer_assign(&bev
->ev_write
, bev
->ev_base
,
819 bufferevent_generic_write_timeout_cb
, bev
);
823 _bufferevent_del_generic_timeout_cbs(struct bufferevent
*bev
)
826 r1
= event_del(&bev
->ev_read
);
827 r2
= event_del(&bev
->ev_write
);
834 _bufferevent_generic_adj_timeouts(struct bufferevent
*bev
)
836 const short enabled
= bev
->enabled
;
837 struct bufferevent_private
*bev_p
=
838 EVUTIL_UPCAST(bev
, struct bufferevent_private
, bev
);
840 if ((enabled
& EV_READ
) && !bev_p
->read_suspended
&&
841 evutil_timerisset(&bev
->timeout_read
))
842 r1
= event_add(&bev
->ev_read
, &bev
->timeout_read
);
844 r1
= event_del(&bev
->ev_read
);
846 if ((enabled
& EV_WRITE
) && !bev_p
->write_suspended
&&
847 evutil_timerisset(&bev
->timeout_write
) &&
848 evbuffer_get_length(bev
->output
))
849 r2
= event_add(&bev
->ev_write
, &bev
->timeout_write
);
851 r2
= event_del(&bev
->ev_write
);
852 if (r1
< 0 || r2
< 0)
858 _bufferevent_add_event(struct event
*ev
, const struct timeval
*tv
)
860 if (tv
->tv_sec
== 0 && tv
->tv_usec
== 0)
861 return event_add(ev
, NULL
);
863 return event_add(ev
, tv
);
866 /* For use by user programs only; internally, we should be calling
867 either _bufferevent_incref_and_lock(), or BEV_LOCK. */
869 bufferevent_lock(struct bufferevent
*bev
)
871 _bufferevent_incref_and_lock(bev
);
875 bufferevent_unlock(struct bufferevent
*bev
)
877 _bufferevent_decref_and_unlock(bev
);