2 * Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu>
3 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "event2/event-config.h"
36 #ifdef _EVENT_HAVE_VASPRINTF
37 /* If we have vasprintf, we need to define this before we include stdio.h. */
41 #include <sys/types.h>
43 #ifdef _EVENT_HAVE_SYS_TIME_H
47 #ifdef _EVENT_HAVE_SYS_SOCKET_H
48 #include <sys/socket.h>
51 #ifdef _EVENT_HAVE_SYS_UIO_H
55 #ifdef _EVENT_HAVE_SYS_IOCTL_H
56 #include <sys/ioctl.h>
59 #ifdef _EVENT_HAVE_SYS_MMAN_H
63 #ifdef _EVENT_HAVE_SYS_SENDFILE_H
64 #include <sys/sendfile.h>
71 #ifdef _EVENT_HAVE_STDARG_H
74 #ifdef _EVENT_HAVE_UNISTD_H
79 #include "event2/event.h"
80 #include "event2/buffer.h"
81 #include "event2/buffer_compat.h"
82 #include "event2/bufferevent.h"
83 #include "event2/bufferevent_compat.h"
84 #include "event2/bufferevent_struct.h"
85 #include "event2/thread.h"
86 #include "event2/event-config.h"
87 #include "log-internal.h"
88 #include "mm-internal.h"
89 #include "util-internal.h"
90 #include "evthread-internal.h"
91 #include "evbuffer-internal.h"
92 #include "bufferevent-internal.h"
94 /* some systems do not have MAP_FAILED */
96 #define MAP_FAILED ((void *)-1)
99 /* send file support */
100 #if defined(_EVENT_HAVE_SYS_SENDFILE_H) && defined(_EVENT_HAVE_SENDFILE) && defined(__linux__)
101 #define USE_SENDFILE 1
102 #define SENDFILE_IS_LINUX 1
103 #elif defined(_EVENT_HAVE_SENDFILE) && defined(__FreeBSD__)
104 #define USE_SENDFILE 1
105 #define SENDFILE_IS_FREEBSD 1
106 #elif defined(_EVENT_HAVE_SENDFILE) && defined(__APPLE__)
107 #define USE_SENDFILE 1
108 #define SENDFILE_IS_MACOSX 1
109 #elif defined(_EVENT_HAVE_SENDFILE) && defined(__sun__) && defined(__svr4__)
110 #define USE_SENDFILE 1
111 #define SENDFILE_IS_SOLARIS 1
115 static int use_sendfile
= 1;
117 #ifdef _EVENT_HAVE_MMAP
118 static int use_mmap
= 1;
122 /* Mask of user-selectable callback flags. */
123 #define EVBUFFER_CB_USER_FLAGS 0xffff
124 /* Mask of all internal-use-only flags. */
125 #define EVBUFFER_CB_INTERNAL_FLAGS 0xffff0000
127 /* Flag set if the callback is using the cb_obsolete function pointer */
128 #define EVBUFFER_CB_OBSOLETE 0x00040000
130 /* evbuffer_chain support */
131 #define CHAIN_SPACE_PTR(ch) ((ch)->buffer + (ch)->misalign + (ch)->off)
132 #define CHAIN_SPACE_LEN(ch) ((ch)->flags & EVBUFFER_IMMUTABLE ? \
133 0 : (ch)->buffer_len - ((ch)->misalign + (ch)->off))
135 #define CHAIN_PINNED(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_ANY) != 0)
136 #define CHAIN_PINNED_R(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_R) != 0)
138 static void evbuffer_chain_align(struct evbuffer_chain
*chain
);
139 static int evbuffer_chain_should_realign(struct evbuffer_chain
*chain
,
141 static void evbuffer_deferred_callback(struct deferred_cb
*cb
, void *arg
);
142 static int evbuffer_ptr_memcmp(const struct evbuffer
*buf
,
143 const struct evbuffer_ptr
*pos
, const char *mem
, size_t len
);
144 static struct evbuffer_chain
*evbuffer_expand_singlechain(struct evbuffer
*buf
,
148 static int evbuffer_readfile(struct evbuffer
*buf
, evutil_socket_t fd
,
151 #define evbuffer_readfile evbuffer_read
154 static struct evbuffer_chain
*
155 evbuffer_chain_new(size_t size
)
157 struct evbuffer_chain
*chain
;
160 size
+= EVBUFFER_CHAIN_SIZE
;
162 /* get the next largest memory that can hold the buffer */
163 to_alloc
= MIN_BUFFER_SIZE
;
164 while (to_alloc
< size
)
167 /* we get everything in one chunk */
168 if ((chain
= mm_malloc(to_alloc
)) == NULL
)
171 memset(chain
, 0, EVBUFFER_CHAIN_SIZE
);
173 chain
->buffer_len
= to_alloc
- EVBUFFER_CHAIN_SIZE
;
175 /* this way we can manipulate the buffer to different addresses,
176 * which is required for mmap for example.
178 chain
->buffer
= EVBUFFER_CHAIN_EXTRA(u_char
, chain
);
184 evbuffer_chain_free(struct evbuffer_chain
*chain
)
186 if (CHAIN_PINNED(chain
)) {
187 chain
->flags
|= EVBUFFER_DANGLING
;
190 if (chain
->flags
& (EVBUFFER_MMAP
|EVBUFFER_SENDFILE
|
191 EVBUFFER_REFERENCE
)) {
192 if (chain
->flags
& EVBUFFER_REFERENCE
) {
193 struct evbuffer_chain_reference
*info
=
194 EVBUFFER_CHAIN_EXTRA(
195 struct evbuffer_chain_reference
,
198 (*info
->cleanupfn
)(chain
->buffer
,
202 #ifdef _EVENT_HAVE_MMAP
203 if (chain
->flags
& EVBUFFER_MMAP
) {
204 struct evbuffer_chain_fd
*info
=
205 EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd
,
207 if (munmap(chain
->buffer
, chain
->buffer_len
) == -1)
208 event_warn("%s: munmap failed", __func__
);
209 if (close(info
->fd
) == -1)
210 event_warn("%s: close(%d) failed",
215 if (chain
->flags
& EVBUFFER_SENDFILE
) {
216 struct evbuffer_chain_fd
*info
=
217 EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd
,
219 if (close(info
->fd
) == -1)
220 event_warn("%s: close(%d) failed",
230 evbuffer_free_all_chains(struct evbuffer_chain
*chain
)
232 struct evbuffer_chain
*next
;
233 for (; chain
; chain
= next
) {
235 evbuffer_chain_free(chain
);
241 evbuffer_chains_all_empty(struct evbuffer_chain
*chain
)
243 for (; chain
; chain
= chain
->next
) {
250 /* The definition is needed for EVUTIL_ASSERT, which uses sizeof to avoid
251 "unused variable" warnings. */
252 static inline int evbuffer_chains_all_empty(struct evbuffer_chain
*chain
) {
257 /* Free all trailing chains in 'buf' that are neither pinned nor empty, prior
258 * to replacing them all with a new chain. Return a pointer to the place
259 * where the new chain will go.
261 * Internal; requires lock. The caller must fix up buf->last and buf->first
262 * as needed; they might have been freed.
264 static struct evbuffer_chain
**
265 evbuffer_free_trailing_empty_chains(struct evbuffer
*buf
)
267 struct evbuffer_chain
**ch
= buf
->last_with_datap
;
268 /* Find the first victim chain. It might be *last_with_datap */
269 while ((*ch
) && ((*ch
)->off
!= 0 || CHAIN_PINNED(*ch
)))
272 EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch
));
273 evbuffer_free_all_chains(*ch
);
279 /* Add a single chain 'chain' to the end of 'buf', freeing trailing empty
280 * chains as necessary. Requires lock. Does not schedule callbacks.
283 evbuffer_chain_insert(struct evbuffer
*buf
,
284 struct evbuffer_chain
*chain
)
286 ASSERT_EVBUFFER_LOCKED(buf
);
287 if (*buf
->last_with_datap
== NULL
) {
288 /* There are no chains data on the buffer at all. */
289 EVUTIL_ASSERT(buf
->last_with_datap
== &buf
->first
);
290 EVUTIL_ASSERT(buf
->first
== NULL
);
291 buf
->first
= buf
->last
= chain
;
293 struct evbuffer_chain
**ch
= buf
->last_with_datap
;
294 /* Find the first victim chain. It might be *last_with_datap */
295 while ((*ch
) && ((*ch
)->off
!= 0 || CHAIN_PINNED(*ch
)))
298 /* There is no victim; just append this new chain. */
299 buf
->last
->next
= chain
;
301 buf
->last_with_datap
= &buf
->last
->next
;
303 /* Replace all victim chains with this chain. */
304 EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch
));
305 evbuffer_free_all_chains(*ch
);
310 buf
->total_len
+= chain
->off
;
313 static inline struct evbuffer_chain
*
314 evbuffer_chain_insert_new(struct evbuffer
*buf
, size_t datlen
)
316 struct evbuffer_chain
*chain
;
317 if ((chain
= evbuffer_chain_new(datlen
)) == NULL
)
319 evbuffer_chain_insert(buf
, chain
);
324 _evbuffer_chain_pin(struct evbuffer_chain
*chain
, unsigned flag
)
326 EVUTIL_ASSERT((chain
->flags
& flag
) == 0);
327 chain
->flags
|= flag
;
331 _evbuffer_chain_unpin(struct evbuffer_chain
*chain
, unsigned flag
)
333 EVUTIL_ASSERT((chain
->flags
& flag
) != 0);
334 chain
->flags
&= ~flag
;
335 if (chain
->flags
& EVBUFFER_DANGLING
)
336 evbuffer_chain_free(chain
);
342 struct evbuffer
*buffer
;
344 buffer
= mm_calloc(1, sizeof(struct evbuffer
));
348 TAILQ_INIT(&buffer
->callbacks
);
350 buffer
->last_with_datap
= &buffer
->first
;
356 evbuffer_set_flags(struct evbuffer
*buf
, ev_uint64_t flags
)
359 buf
->flags
|= (ev_uint32_t
)flags
;
360 EVBUFFER_UNLOCK(buf
);
365 evbuffer_clear_flags(struct evbuffer
*buf
, ev_uint64_t flags
)
368 buf
->flags
&= ~(ev_uint32_t
)flags
;
369 EVBUFFER_UNLOCK(buf
);
374 _evbuffer_incref(struct evbuffer
*buf
)
378 EVBUFFER_UNLOCK(buf
);
382 _evbuffer_incref_and_lock(struct evbuffer
*buf
)
389 evbuffer_defer_callbacks(struct evbuffer
*buffer
, struct event_base
*base
)
391 EVBUFFER_LOCK(buffer
);
392 buffer
->cb_queue
= event_base_get_deferred_cb_queue(base
);
393 buffer
->deferred_cbs
= 1;
394 event_deferred_cb_init(&buffer
->deferred
,
395 evbuffer_deferred_callback
, buffer
);
396 EVBUFFER_UNLOCK(buffer
);
401 evbuffer_enable_locking(struct evbuffer
*buf
, void *lock
)
403 #ifdef _EVENT_DISABLE_THREAD_SUPPORT
410 EVTHREAD_ALLOC_LOCK(lock
, EVTHREAD_LOCKTYPE_RECURSIVE
);
425 evbuffer_set_parent(struct evbuffer
*buf
, struct bufferevent
*bev
)
429 EVBUFFER_UNLOCK(buf
);
433 evbuffer_run_callbacks(struct evbuffer
*buffer
, int running_deferred
)
435 struct evbuffer_cb_entry
*cbent
, *next
;
436 struct evbuffer_cb_info info
;
438 ev_uint32_t mask
, masked_val
;
441 if (running_deferred
) {
442 mask
= EVBUFFER_CB_NODEFER
|EVBUFFER_CB_ENABLED
;
443 masked_val
= EVBUFFER_CB_ENABLED
;
444 } else if (buffer
->deferred_cbs
) {
445 mask
= EVBUFFER_CB_NODEFER
|EVBUFFER_CB_ENABLED
;
446 masked_val
= EVBUFFER_CB_NODEFER
|EVBUFFER_CB_ENABLED
;
447 /* Don't zero-out n_add/n_del, since the deferred callbacks
448 will want to see them. */
451 mask
= EVBUFFER_CB_ENABLED
;
452 masked_val
= EVBUFFER_CB_ENABLED
;
455 ASSERT_EVBUFFER_LOCKED(buffer
);
457 if (TAILQ_EMPTY(&buffer
->callbacks
)) {
458 buffer
->n_add_for_cb
= buffer
->n_del_for_cb
= 0;
461 if (buffer
->n_add_for_cb
== 0 && buffer
->n_del_for_cb
== 0)
464 new_size
= buffer
->total_len
;
465 info
.orig_size
= new_size
+ buffer
->n_del_for_cb
- buffer
->n_add_for_cb
;
466 info
.n_added
= buffer
->n_add_for_cb
;
467 info
.n_deleted
= buffer
->n_del_for_cb
;
469 buffer
->n_add_for_cb
= 0;
470 buffer
->n_del_for_cb
= 0;
472 for (cbent
= TAILQ_FIRST(&buffer
->callbacks
);
473 cbent
!= TAILQ_END(&buffer
->callbacks
);
475 /* Get the 'next' pointer now in case this callback decides
476 * to remove itself or something. */
477 next
= TAILQ_NEXT(cbent
, next
);
479 if ((cbent
->flags
& mask
) != masked_val
)
482 if ((cbent
->flags
& EVBUFFER_CB_OBSOLETE
))
483 cbent
->cb
.cb_obsolete(buffer
,
484 info
.orig_size
, new_size
, cbent
->cbarg
);
486 cbent
->cb
.cb_func(buffer
, &info
, cbent
->cbarg
);
491 evbuffer_invoke_callbacks(struct evbuffer
*buffer
)
493 if (TAILQ_EMPTY(&buffer
->callbacks
)) {
494 buffer
->n_add_for_cb
= buffer
->n_del_for_cb
= 0;
498 if (buffer
->deferred_cbs
) {
499 if (buffer
->deferred
.queued
)
501 _evbuffer_incref_and_lock(buffer
);
503 bufferevent_incref(buffer
->parent
);
504 EVBUFFER_UNLOCK(buffer
);
505 event_deferred_cb_schedule(buffer
->cb_queue
, &buffer
->deferred
);
508 evbuffer_run_callbacks(buffer
, 0);
512 evbuffer_deferred_callback(struct deferred_cb
*cb
, void *arg
)
514 struct bufferevent
*parent
= NULL
;
515 struct evbuffer
*buffer
= arg
;
517 /* XXXX It would be better to run these callbacks without holding the
519 EVBUFFER_LOCK(buffer
);
520 parent
= buffer
->parent
;
521 evbuffer_run_callbacks(buffer
, 1);
522 _evbuffer_decref_and_unlock(buffer
);
524 bufferevent_decref(parent
);
528 evbuffer_remove_all_callbacks(struct evbuffer
*buffer
)
530 struct evbuffer_cb_entry
*cbent
;
532 while ((cbent
= TAILQ_FIRST(&buffer
->callbacks
))) {
533 TAILQ_REMOVE(&buffer
->callbacks
, cbent
, next
);
539 _evbuffer_decref_and_unlock(struct evbuffer
*buffer
)
541 struct evbuffer_chain
*chain
, *next
;
542 ASSERT_EVBUFFER_LOCKED(buffer
);
544 EVUTIL_ASSERT(buffer
->refcnt
> 0);
546 if (--buffer
->refcnt
> 0) {
547 EVBUFFER_UNLOCK(buffer
);
551 for (chain
= buffer
->first
; chain
!= NULL
; chain
= next
) {
553 evbuffer_chain_free(chain
);
555 evbuffer_remove_all_callbacks(buffer
);
556 if (buffer
->deferred_cbs
)
557 event_deferred_cb_cancel(buffer
->cb_queue
, &buffer
->deferred
);
559 EVBUFFER_UNLOCK(buffer
);
560 if (buffer
->own_lock
)
561 EVTHREAD_FREE_LOCK(buffer
->lock
, EVTHREAD_LOCKTYPE_RECURSIVE
);
566 evbuffer_free(struct evbuffer
*buffer
)
568 EVBUFFER_LOCK(buffer
);
569 _evbuffer_decref_and_unlock(buffer
);
573 evbuffer_lock(struct evbuffer
*buf
)
579 evbuffer_unlock(struct evbuffer
*buf
)
581 EVBUFFER_UNLOCK(buf
);
585 evbuffer_get_length(const struct evbuffer
*buffer
)
589 EVBUFFER_LOCK(buffer
);
591 result
= (buffer
->total_len
);
593 EVBUFFER_UNLOCK(buffer
);
599 evbuffer_get_contiguous_space(const struct evbuffer
*buf
)
601 struct evbuffer_chain
*chain
;
606 result
= (chain
!= NULL
? chain
->off
: 0);
607 EVBUFFER_UNLOCK(buf
);
613 evbuffer_reserve_space(struct evbuffer
*buf
, ev_ssize_t size
,
614 struct evbuffer_iovec
*vec
, int n_vecs
)
616 struct evbuffer_chain
*chain
, **chainp
;
625 if ((chain
= evbuffer_expand_singlechain(buf
, size
)) == NULL
)
628 vec
[0].iov_base
= CHAIN_SPACE_PTR(chain
);
629 vec
[0].iov_len
= (size_t) CHAIN_SPACE_LEN(chain
);
630 EVUTIL_ASSERT(size
<0 || (size_t)vec
[0].iov_len
>= (size_t)size
);
633 if (_evbuffer_expand_fast(buf
, size
, n_vecs
)<0)
635 n
= _evbuffer_read_setup_vecs(buf
, size
, vec
, n_vecs
,
640 EVBUFFER_UNLOCK(buf
);
646 advance_last_with_data(struct evbuffer
*buf
)
649 ASSERT_EVBUFFER_LOCKED(buf
);
651 if (!*buf
->last_with_datap
)
654 while ((*buf
->last_with_datap
)->next
&& (*buf
->last_with_datap
)->next
->off
) {
655 buf
->last_with_datap
= &(*buf
->last_with_datap
)->next
;
662 evbuffer_commit_space(struct evbuffer
*buf
,
663 struct evbuffer_iovec
*vec
, int n_vecs
)
665 struct evbuffer_chain
*chain
, **firstchainp
, **chainp
;
677 } else if (n_vecs
== 1 &&
678 (buf
->last
&& vec
[0].iov_base
== (void*)CHAIN_SPACE_PTR(buf
->last
))) {
679 /* The user only got or used one chain; it might not
680 * be the first one with space in it. */
681 if ((size_t)vec
[0].iov_len
> (size_t)CHAIN_SPACE_LEN(buf
->last
))
683 buf
->last
->off
+= vec
[0].iov_len
;
684 added
= vec
[0].iov_len
;
686 advance_last_with_data(buf
);
690 /* Advance 'firstchain' to the first chain with space in it. */
691 firstchainp
= buf
->last_with_datap
;
694 if (CHAIN_SPACE_LEN(*firstchainp
) == 0) {
695 firstchainp
= &(*firstchainp
)->next
;
698 chain
= *firstchainp
;
699 /* pass 1: make sure that the pointers and lengths of vecs[] are in
700 * bounds before we try to commit anything. */
701 for (i
=0; i
<n_vecs
; ++i
) {
704 if (vec
[i
].iov_base
!= (void*)CHAIN_SPACE_PTR(chain
) ||
705 (size_t)vec
[i
].iov_len
> CHAIN_SPACE_LEN(chain
))
709 /* pass 2: actually adjust all the chains. */
710 chainp
= firstchainp
;
711 for (i
=0; i
<n_vecs
; ++i
) {
712 (*chainp
)->off
+= vec
[i
].iov_len
;
713 added
+= vec
[i
].iov_len
;
714 if (vec
[i
].iov_len
) {
715 buf
->last_with_datap
= chainp
;
717 chainp
= &(*chainp
)->next
;
721 buf
->total_len
+= added
;
722 buf
->n_add_for_cb
+= added
;
724 evbuffer_invoke_callbacks(buf
);
727 EVBUFFER_UNLOCK(buf
);
732 HAS_PINNED_R(struct evbuffer
*buf
)
734 return (buf
->last
&& CHAIN_PINNED_R(buf
->last
));
738 ZERO_CHAIN(struct evbuffer
*dst
)
740 ASSERT_EVBUFFER_LOCKED(dst
);
743 dst
->last_with_datap
= &(dst
)->first
;
747 /* Prepares the contents of src to be moved to another buffer by removing
748 * read-pinned chains. The first pinned chain is saved in first, and the
749 * last in last. If src has no read-pinned chains, first and last are set
752 PRESERVE_PINNED(struct evbuffer
*src
, struct evbuffer_chain
**first
,
753 struct evbuffer_chain
**last
)
755 struct evbuffer_chain
*chain
, **pinned
;
757 ASSERT_EVBUFFER_LOCKED(src
);
759 if (!HAS_PINNED_R(src
)) {
760 *first
= *last
= NULL
;
764 pinned
= src
->last_with_datap
;
765 if (!CHAIN_PINNED_R(*pinned
))
766 pinned
= &(*pinned
)->next
;
767 EVUTIL_ASSERT(CHAIN_PINNED_R(*pinned
));
768 chain
= *first
= *pinned
;
771 /* If there's data in the first pinned chain, we need to allocate
772 * a new chain and copy the data over. */
774 struct evbuffer_chain
*tmp
;
776 EVUTIL_ASSERT(pinned
== src
->last_with_datap
);
777 tmp
= evbuffer_chain_new(chain
->off
);
780 memcpy(tmp
->buffer
, chain
->buffer
+ chain
->misalign
,
782 tmp
->off
= chain
->off
;
783 *src
->last_with_datap
= tmp
;
785 chain
->misalign
+= chain
->off
;
788 src
->last
= *src
->last_with_datap
;
796 RESTORE_PINNED(struct evbuffer
*src
, struct evbuffer_chain
*pinned
,
797 struct evbuffer_chain
*last
)
799 ASSERT_EVBUFFER_LOCKED(src
);
808 src
->last_with_datap
= &src
->first
;
813 COPY_CHAIN(struct evbuffer
*dst
, struct evbuffer
*src
)
815 ASSERT_EVBUFFER_LOCKED(dst
);
816 ASSERT_EVBUFFER_LOCKED(src
);
817 dst
->first
= src
->first
;
818 if (src
->last_with_datap
== &src
->first
)
819 dst
->last_with_datap
= &dst
->first
;
821 dst
->last_with_datap
= src
->last_with_datap
;
822 dst
->last
= src
->last
;
823 dst
->total_len
= src
->total_len
;
827 APPEND_CHAIN(struct evbuffer
*dst
, struct evbuffer
*src
)
829 ASSERT_EVBUFFER_LOCKED(dst
);
830 ASSERT_EVBUFFER_LOCKED(src
);
831 dst
->last
->next
= src
->first
;
832 if (src
->last_with_datap
== &src
->first
)
833 dst
->last_with_datap
= &dst
->last
->next
;
835 dst
->last_with_datap
= src
->last_with_datap
;
836 dst
->last
= src
->last
;
837 dst
->total_len
+= src
->total_len
;
841 PREPEND_CHAIN(struct evbuffer
*dst
, struct evbuffer
*src
)
843 ASSERT_EVBUFFER_LOCKED(dst
);
844 ASSERT_EVBUFFER_LOCKED(src
);
845 src
->last
->next
= dst
->first
;
846 dst
->first
= src
->first
;
847 dst
->total_len
+= src
->total_len
;
848 if (*dst
->last_with_datap
== NULL
) {
849 if (src
->last_with_datap
== &(src
)->first
)
850 dst
->last_with_datap
= &dst
->first
;
852 dst
->last_with_datap
= src
->last_with_datap
;
853 } else if (dst
->last_with_datap
== &dst
->first
) {
854 dst
->last_with_datap
= &src
->last
->next
;
859 evbuffer_add_buffer(struct evbuffer
*outbuf
, struct evbuffer
*inbuf
)
861 struct evbuffer_chain
*pinned
, *last
;
862 size_t in_total_len
, out_total_len
;
865 EVBUFFER_LOCK2(inbuf
, outbuf
);
866 in_total_len
= inbuf
->total_len
;
867 out_total_len
= outbuf
->total_len
;
869 if (in_total_len
== 0 || outbuf
== inbuf
)
872 if (outbuf
->freeze_end
|| inbuf
->freeze_start
) {
877 if (PRESERVE_PINNED(inbuf
, &pinned
, &last
) < 0) {
882 if (out_total_len
== 0) {
883 /* There might be an empty chain at the start of outbuf; free
885 evbuffer_free_all_chains(outbuf
->first
);
886 COPY_CHAIN(outbuf
, inbuf
);
888 APPEND_CHAIN(outbuf
, inbuf
);
891 RESTORE_PINNED(inbuf
, pinned
, last
);
893 inbuf
->n_del_for_cb
+= in_total_len
;
894 outbuf
->n_add_for_cb
+= in_total_len
;
896 evbuffer_invoke_callbacks(inbuf
);
897 evbuffer_invoke_callbacks(outbuf
);
900 EVBUFFER_UNLOCK2(inbuf
, outbuf
);
905 evbuffer_prepend_buffer(struct evbuffer
*outbuf
, struct evbuffer
*inbuf
)
907 struct evbuffer_chain
*pinned
, *last
;
908 size_t in_total_len
, out_total_len
;
911 EVBUFFER_LOCK2(inbuf
, outbuf
);
913 in_total_len
= inbuf
->total_len
;
914 out_total_len
= outbuf
->total_len
;
916 if (!in_total_len
|| inbuf
== outbuf
)
919 if (outbuf
->freeze_start
|| inbuf
->freeze_start
) {
924 if (PRESERVE_PINNED(inbuf
, &pinned
, &last
) < 0) {
929 if (out_total_len
== 0) {
930 /* There might be an empty chain at the start of outbuf; free
932 evbuffer_free_all_chains(outbuf
->first
);
933 COPY_CHAIN(outbuf
, inbuf
);
935 PREPEND_CHAIN(outbuf
, inbuf
);
938 RESTORE_PINNED(inbuf
, pinned
, last
);
940 inbuf
->n_del_for_cb
+= in_total_len
;
941 outbuf
->n_add_for_cb
+= in_total_len
;
943 evbuffer_invoke_callbacks(inbuf
);
944 evbuffer_invoke_callbacks(outbuf
);
946 EVBUFFER_UNLOCK2(inbuf
, outbuf
);
951 evbuffer_drain(struct evbuffer
*buf
, size_t len
)
953 struct evbuffer_chain
*chain
, *next
;
954 size_t remaining
, old_len
;
958 old_len
= buf
->total_len
;
963 if (buf
->freeze_start
) {
968 if (len
>= old_len
&& !HAS_PINNED_R(buf
)) {
970 for (chain
= buf
->first
; chain
!= NULL
; chain
= next
) {
972 evbuffer_chain_free(chain
);
980 buf
->total_len
-= len
;
982 for (chain
= buf
->first
;
983 remaining
>= chain
->off
;
986 remaining
-= chain
->off
;
988 if (chain
== *buf
->last_with_datap
) {
989 buf
->last_with_datap
= &buf
->first
;
991 if (&chain
->next
== buf
->last_with_datap
)
992 buf
->last_with_datap
= &buf
->first
;
994 if (CHAIN_PINNED_R(chain
)) {
995 EVUTIL_ASSERT(remaining
== 0);
996 chain
->misalign
+= chain
->off
;
1000 evbuffer_chain_free(chain
);
1005 chain
->misalign
+= remaining
;
1006 chain
->off
-= remaining
;
1010 buf
->n_del_for_cb
+= len
;
1011 /* Tell someone about changes in this buffer */
1012 evbuffer_invoke_callbacks(buf
);
1015 EVBUFFER_UNLOCK(buf
);
1019 /* Reads data from an event buffer and drains the bytes read */
1021 evbuffer_remove(struct evbuffer
*buf
, void *data_out
, size_t datlen
)
1025 n
= evbuffer_copyout(buf
, data_out
, datlen
);
1027 if (evbuffer_drain(buf
, n
)<0)
1030 EVBUFFER_UNLOCK(buf
);
1035 evbuffer_copyout(struct evbuffer
*buf
, void *data_out
, size_t datlen
)
1037 /*XXX fails badly on sendfile case. */
1038 struct evbuffer_chain
*chain
;
1039 char *data
= data_out
;
1041 ev_ssize_t result
= 0;
1047 if (datlen
>= buf
->total_len
)
1048 datlen
= buf
->total_len
;
1053 if (buf
->freeze_start
) {
1060 while (datlen
&& datlen
>= chain
->off
) {
1061 memcpy(data
, chain
->buffer
+ chain
->misalign
, chain
->off
);
1063 datlen
-= chain
->off
;
1065 chain
= chain
->next
;
1066 EVUTIL_ASSERT(chain
|| datlen
==0);
1070 EVUTIL_ASSERT(chain
);
1071 memcpy(data
, chain
->buffer
+ chain
->misalign
, datlen
);
1076 EVBUFFER_UNLOCK(buf
);
1080 /* reads data from the src buffer to the dst buffer, avoids memcpy as
1082 /* XXXX should return ev_ssize_t */
1084 evbuffer_remove_buffer(struct evbuffer
*src
, struct evbuffer
*dst
,
1087 /*XXX We should have an option to force this to be zero-copy.*/
1089 /*XXX can fail badly on sendfile case. */
1090 struct evbuffer_chain
*chain
, *previous
;
1094 EVBUFFER_LOCK2(src
, dst
);
1096 chain
= previous
= src
->first
;
1098 if (datlen
== 0 || dst
== src
) {
1103 if (dst
->freeze_end
|| src
->freeze_start
) {
1108 /* short-cut if there is no more data buffered */
1109 if (datlen
>= src
->total_len
) {
1110 datlen
= src
->total_len
;
1111 evbuffer_add_buffer(dst
, src
);
1112 result
= (int)datlen
; /*XXXX should return ev_ssize_t*/
1116 /* removes chains if possible */
1117 while (chain
->off
<= datlen
) {
1118 /* We can't remove the last with data from src unless we
1119 * remove all chains, in which case we would have done the if
1121 EVUTIL_ASSERT(chain
!= *src
->last_with_datap
);
1122 nread
+= chain
->off
;
1123 datlen
-= chain
->off
;
1125 if (src
->last_with_datap
== &chain
->next
)
1126 src
->last_with_datap
= &src
->first
;
1127 chain
= chain
->next
;
1131 /* we can remove the chain */
1132 struct evbuffer_chain
**chp
;
1133 chp
= evbuffer_free_trailing_empty_chains(dst
);
1135 if (dst
->first
== NULL
) {
1136 dst
->first
= src
->first
;
1140 dst
->last
= previous
;
1141 previous
->next
= NULL
;
1143 advance_last_with_data(dst
);
1145 dst
->total_len
+= nread
;
1146 dst
->n_add_for_cb
+= nread
;
1149 /* we know that there is more data in the src buffer than
1150 * we want to read, so we manually drain the chain */
1151 evbuffer_add(dst
, chain
->buffer
+ chain
->misalign
, datlen
);
1152 chain
->misalign
+= datlen
;
1153 chain
->off
-= datlen
;
1156 /* You might think we would want to increment dst->n_add_for_cb
1157 * here too. But evbuffer_add above already took care of that.
1159 src
->total_len
-= nread
;
1160 src
->n_del_for_cb
+= nread
;
1163 evbuffer_invoke_callbacks(dst
);
1164 evbuffer_invoke_callbacks(src
);
1166 result
= (int)nread
;/*XXXX should change return type */
1169 EVBUFFER_UNLOCK2(src
, dst
);
1174 evbuffer_pullup(struct evbuffer
*buf
, ev_ssize_t size
)
1176 struct evbuffer_chain
*chain
, *next
, *tmp
, *last_with_data
;
1177 unsigned char *buffer
, *result
= NULL
;
1178 ev_ssize_t remaining
;
1179 int removed_last_with_data
= 0;
1180 int removed_last_with_datap
= 0;
1187 size
= buf
->total_len
;
1188 /* if size > buf->total_len, we cannot guarantee to the user that she
1189 * is going to have a long enough buffer afterwards; so we return
1191 if (size
== 0 || (size_t)size
> buf
->total_len
)
1194 /* No need to pull up anything; the first size bytes are
1196 if (chain
->off
>= (size_t)size
) {
1197 result
= chain
->buffer
+ chain
->misalign
;
1201 /* Make sure that none of the chains we need to copy from is pinned. */
1202 remaining
= size
- chain
->off
;
1203 EVUTIL_ASSERT(remaining
>= 0);
1204 for (tmp
=chain
->next
; tmp
; tmp
=tmp
->next
) {
1205 if (CHAIN_PINNED(tmp
))
1207 if (tmp
->off
>= (size_t)remaining
)
1209 remaining
-= tmp
->off
;
1212 if (CHAIN_PINNED(chain
)) {
1213 size_t old_off
= chain
->off
;
1214 if (CHAIN_SPACE_LEN(chain
) < size
- chain
->off
) {
1215 /* not enough room at end of chunk. */
1218 buffer
= CHAIN_SPACE_PTR(chain
);
1222 chain
= chain
->next
;
1223 } else if (chain
->buffer_len
- chain
->misalign
>= (size_t)size
) {
1224 /* already have enough space in the first chain */
1225 size_t old_off
= chain
->off
;
1226 buffer
= chain
->buffer
+ chain
->misalign
+ chain
->off
;
1230 chain
= chain
->next
;
1232 if ((tmp
= evbuffer_chain_new(size
)) == NULL
) {
1233 event_warn("%s: out of memory", __func__
);
1236 buffer
= tmp
->buffer
;
1241 /* TODO(niels): deal with buffers that point to NULL like sendfile */
1243 /* Copy and free every chunk that will be entirely pulled into tmp */
1244 last_with_data
= *buf
->last_with_datap
;
1245 for (; chain
!= NULL
&& (size_t)size
>= chain
->off
; chain
= next
) {
1248 memcpy(buffer
, chain
->buffer
+ chain
->misalign
, chain
->off
);
1250 buffer
+= chain
->off
;
1251 if (chain
== last_with_data
)
1252 removed_last_with_data
= 1;
1253 if (&chain
->next
== buf
->last_with_datap
)
1254 removed_last_with_datap
= 1;
1256 evbuffer_chain_free(chain
);
1259 if (chain
!= NULL
) {
1260 memcpy(buffer
, chain
->buffer
+ chain
->misalign
, size
);
1261 chain
->misalign
+= size
;
1269 if (removed_last_with_data
) {
1270 buf
->last_with_datap
= &buf
->first
;
1271 } else if (removed_last_with_datap
) {
1272 if (buf
->first
->next
&& buf
->first
->next
->off
)
1273 buf
->last_with_datap
= &buf
->first
->next
;
1275 buf
->last_with_datap
= &buf
->first
;
1278 result
= (tmp
->buffer
+ tmp
->misalign
);
1281 EVBUFFER_UNLOCK(buf
);
1286 * Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'.
1287 * The returned buffer needs to be freed by the called.
1290 evbuffer_readline(struct evbuffer
*buffer
)
1292 return evbuffer_readln(buffer
, NULL
, EVBUFFER_EOL_ANY
);
1295 static inline ev_ssize_t
1296 evbuffer_strchr(struct evbuffer_ptr
*it
, const char chr
)
1298 struct evbuffer_chain
*chain
= it
->_internal
.chain
;
1299 size_t i
= it
->_internal
.pos_in_chain
;
1300 while (chain
!= NULL
) {
1301 char *buffer
= (char *)chain
->buffer
+ chain
->misalign
;
1302 char *cp
= memchr(buffer
+i
, chr
, chain
->off
-i
);
1304 it
->_internal
.chain
= chain
;
1305 it
->_internal
.pos_in_chain
= cp
- buffer
;
1306 it
->pos
+= (cp
- buffer
- i
);
1309 it
->pos
+= chain
->off
- i
;
1311 chain
= chain
->next
;
1317 static inline char *
1318 find_eol_char(char *s
, size_t len
)
1320 #define CHUNK_SZ 128
1321 /* Lots of benchmarking found this approach to be faster in practice
1322 * than doing two memchrs over the whole buffer, doin a memchr on each
1323 * char of the buffer, or trying to emulate memchr by hand. */
1324 char *s_end
, *cr
, *lf
;
1327 size_t chunk
= (s
+ CHUNK_SZ
< s_end
) ? CHUNK_SZ
: (s_end
- s
);
1328 cr
= memchr(s
, '\r', chunk
);
1329 lf
= memchr(s
, '\n', chunk
);
1345 evbuffer_find_eol_char(struct evbuffer_ptr
*it
)
1347 struct evbuffer_chain
*chain
= it
->_internal
.chain
;
1348 size_t i
= it
->_internal
.pos_in_chain
;
1349 while (chain
!= NULL
) {
1350 char *buffer
= (char *)chain
->buffer
+ chain
->misalign
;
1351 char *cp
= find_eol_char(buffer
+i
, chain
->off
-i
);
1353 it
->_internal
.chain
= chain
;
1354 it
->_internal
.pos_in_chain
= cp
- buffer
;
1355 it
->pos
+= (cp
- buffer
) - i
;
1358 it
->pos
+= chain
->off
- i
;
1360 chain
= chain
->next
;
1368 struct evbuffer_ptr
*ptr
, const char *chrset
)
1371 struct evbuffer_chain
*chain
= ptr
->_internal
.chain
;
1372 size_t i
= ptr
->_internal
.pos_in_chain
;
1378 char *buffer
= (char *)chain
->buffer
+ chain
->misalign
;
1379 for (; i
< chain
->off
; ++i
) {
1380 const char *p
= chrset
;
1382 if (buffer
[i
] == *p
++)
1385 ptr
->_internal
.chain
= chain
;
1386 ptr
->_internal
.pos_in_chain
= i
;
1394 if (! chain
->next
) {
1395 ptr
->_internal
.chain
= chain
;
1396 ptr
->_internal
.pos_in_chain
= i
;
1401 chain
= chain
->next
;
1407 evbuffer_getchr(struct evbuffer_ptr
*it
)
1409 struct evbuffer_chain
*chain
= it
->_internal
.chain
;
1410 size_t off
= it
->_internal
.pos_in_chain
;
1412 return chain
->buffer
[chain
->misalign
+ off
];
1416 evbuffer_search_eol(struct evbuffer
*buffer
,
1417 struct evbuffer_ptr
*start
, size_t *eol_len_out
,
1418 enum evbuffer_eol_style eol_style
)
1420 struct evbuffer_ptr it
, it2
;
1421 size_t extra_drain
= 0;
1424 EVBUFFER_LOCK(buffer
);
1427 memcpy(&it
, start
, sizeof(it
));
1430 it
._internal
.chain
= buffer
->first
;
1431 it
._internal
.pos_in_chain
= 0;
1434 /* the eol_style determines our first stop character and how many
1435 * characters we are going to drain afterwards. */
1436 switch (eol_style
) {
1437 case EVBUFFER_EOL_ANY
:
1438 if (evbuffer_find_eol_char(&it
) < 0)
1440 memcpy(&it2
, &it
, sizeof(it
));
1441 extra_drain
= evbuffer_strspn(&it2
, "\r\n");
1443 case EVBUFFER_EOL_CRLF_STRICT
: {
1444 it
= evbuffer_search(buffer
, "\r\n", 2, &it
);
1450 case EVBUFFER_EOL_CRLF
:
1452 if (evbuffer_find_eol_char(&it
) < 0)
1454 if (evbuffer_getchr(&it
) == '\n') {
1457 } else if (!evbuffer_ptr_memcmp(
1458 buffer
, &it
, "\r\n", 2)) {
1462 if (evbuffer_ptr_set(buffer
, &it
, 1,
1463 EVBUFFER_PTR_ADD
)<0)
1468 case EVBUFFER_EOL_LF
:
1469 if (evbuffer_strchr(&it
, '\n') < 0)
1479 EVBUFFER_UNLOCK(buffer
);
1485 *eol_len_out
= extra_drain
;
1491 evbuffer_readln(struct evbuffer
*buffer
, size_t *n_read_out
,
1492 enum evbuffer_eol_style eol_style
)
1494 struct evbuffer_ptr it
;
1496 size_t n_to_copy
=0, extra_drain
=0;
1497 char *result
= NULL
;
1499 EVBUFFER_LOCK(buffer
);
1501 if (buffer
->freeze_start
) {
1505 it
= evbuffer_search_eol(buffer
, NULL
, &extra_drain
, eol_style
);
1510 if ((line
= mm_malloc(n_to_copy
+1)) == NULL
) {
1511 event_warn("%s: out of memory", __func__
);
1515 evbuffer_remove(buffer
, line
, n_to_copy
);
1516 line
[n_to_copy
] = '\0';
1518 evbuffer_drain(buffer
, extra_drain
);
1521 EVBUFFER_UNLOCK(buffer
);
1524 *n_read_out
= result
? n_to_copy
: 0;
1529 #define EVBUFFER_CHAIN_MAX_AUTO_SIZE 4096
1531 /* Adds data to an event buffer */
1534 evbuffer_add(struct evbuffer
*buf
, const void *data_in
, size_t datlen
)
1536 struct evbuffer_chain
*chain
, *tmp
;
1537 const unsigned char *data
= data_in
;
1538 size_t remain
, to_alloc
;
1543 if (buf
->freeze_end
) {
1549 /* If there are no chains allocated for this buffer, allocate one
1550 * big enough to hold all the data. */
1551 if (chain
== NULL
) {
1552 chain
= evbuffer_chain_new(datlen
);
1555 evbuffer_chain_insert(buf
, chain
);
1558 if ((chain
->flags
& EVBUFFER_IMMUTABLE
) == 0) {
1559 remain
= (size_t)(chain
->buffer_len
- chain
->misalign
- chain
->off
);
1560 if (remain
>= datlen
) {
1561 /* there's enough space to hold all the data in the
1562 * current last chain */
1563 memcpy(chain
->buffer
+ chain
->misalign
+ chain
->off
,
1565 chain
->off
+= datlen
;
1566 buf
->total_len
+= datlen
;
1567 buf
->n_add_for_cb
+= datlen
;
1569 } else if (!CHAIN_PINNED(chain
) &&
1570 evbuffer_chain_should_realign(chain
, datlen
)) {
1571 /* we can fit the data into the misalignment */
1572 evbuffer_chain_align(chain
);
1574 memcpy(chain
->buffer
+ chain
->off
, data
, datlen
);
1575 chain
->off
+= datlen
;
1576 buf
->total_len
+= datlen
;
1577 buf
->n_add_for_cb
+= datlen
;
1581 /* we cannot write any data to the last chain */
1585 /* we need to add another chain */
1586 to_alloc
= chain
->buffer_len
;
1587 if (to_alloc
<= EVBUFFER_CHAIN_MAX_AUTO_SIZE
/2)
1589 if (datlen
> to_alloc
)
1591 tmp
= evbuffer_chain_new(to_alloc
);
1596 memcpy(chain
->buffer
+ chain
->misalign
+ chain
->off
,
1598 chain
->off
+= remain
;
1599 buf
->total_len
+= remain
;
1600 buf
->n_add_for_cb
+= remain
;
1606 memcpy(tmp
->buffer
, data
, datlen
);
1608 evbuffer_chain_insert(buf
, tmp
);
1609 buf
->n_add_for_cb
+= datlen
;
1612 evbuffer_invoke_callbacks(buf
);
1615 EVBUFFER_UNLOCK(buf
);
1620 evbuffer_prepend(struct evbuffer
*buf
, const void *data
, size_t datlen
)
1622 struct evbuffer_chain
*chain
, *tmp
;
1627 if (buf
->freeze_start
) {
1633 if (chain
== NULL
) {
1634 chain
= evbuffer_chain_new(datlen
);
1637 evbuffer_chain_insert(buf
, chain
);
1640 /* we cannot touch immutable buffers */
1641 if ((chain
->flags
& EVBUFFER_IMMUTABLE
) == 0) {
1642 /* If this chain is empty, we can treat it as
1643 * 'empty at the beginning' rather than 'empty at the end' */
1644 if (chain
->off
== 0)
1645 chain
->misalign
= chain
->buffer_len
;
1647 if ((size_t)chain
->misalign
>= datlen
) {
1648 /* we have enough space to fit everything */
1649 memcpy(chain
->buffer
+ chain
->misalign
- datlen
,
1651 chain
->off
+= datlen
;
1652 chain
->misalign
-= datlen
;
1653 buf
->total_len
+= datlen
;
1654 buf
->n_add_for_cb
+= datlen
;
1656 } else if (chain
->misalign
) {
1657 /* we can only fit some of the data. */
1658 memcpy(chain
->buffer
,
1659 (char*)data
+ datlen
- chain
->misalign
,
1660 (size_t)chain
->misalign
);
1661 chain
->off
+= (size_t)chain
->misalign
;
1662 buf
->total_len
+= (size_t)chain
->misalign
;
1663 buf
->n_add_for_cb
+= (size_t)chain
->misalign
;
1664 datlen
-= (size_t)chain
->misalign
;
1665 chain
->misalign
= 0;
1669 /* we need to add another chain */
1670 if ((tmp
= evbuffer_chain_new(datlen
)) == NULL
)
1673 if (buf
->last_with_datap
== &buf
->first
)
1674 buf
->last_with_datap
= &tmp
->next
;
1679 tmp
->misalign
= tmp
->buffer_len
- datlen
;
1681 memcpy(tmp
->buffer
+ tmp
->misalign
, data
, datlen
);
1682 buf
->total_len
+= datlen
;
1683 buf
->n_add_for_cb
+= (size_t)chain
->misalign
;
1686 evbuffer_invoke_callbacks(buf
);
1689 EVBUFFER_UNLOCK(buf
);
1693 /** Helper: realigns the memory in chain->buffer so that misalign is 0. */
1695 evbuffer_chain_align(struct evbuffer_chain
*chain
)
1697 EVUTIL_ASSERT(!(chain
->flags
& EVBUFFER_IMMUTABLE
));
1698 EVUTIL_ASSERT(!(chain
->flags
& EVBUFFER_MEM_PINNED_ANY
));
1699 memmove(chain
->buffer
, chain
->buffer
+ chain
->misalign
, chain
->off
);
1700 chain
->misalign
= 0;
1703 #define MAX_TO_COPY_IN_EXPAND 4096
1704 #define MAX_TO_REALIGN_IN_EXPAND 2048
1706 /** Helper: return true iff we should realign chain to fit datalen bytes of
1709 evbuffer_chain_should_realign(struct evbuffer_chain
*chain
,
1712 return chain
->buffer_len
- chain
->off
>= datlen
&&
1713 (chain
->off
< chain
->buffer_len
/ 2) &&
1714 (chain
->off
<= MAX_TO_REALIGN_IN_EXPAND
);
1717 /* Expands the available space in the event buffer to at least datlen, all in
1718 * a single chunk. Return that chunk. */
1719 static struct evbuffer_chain
*
1720 evbuffer_expand_singlechain(struct evbuffer
*buf
, size_t datlen
)
1722 struct evbuffer_chain
*chain
, **chainp
;
1723 struct evbuffer_chain
*result
= NULL
;
1724 ASSERT_EVBUFFER_LOCKED(buf
);
1726 chainp
= buf
->last_with_datap
;
1728 /* XXX If *chainp is no longer writeable, but has enough space in its
1729 * misalign, this might be a bad idea: we could still use *chainp, not
1730 * (*chainp)->next. */
1731 if (*chainp
&& CHAIN_SPACE_LEN(*chainp
) == 0)
1732 chainp
= &(*chainp
)->next
;
1734 /* 'chain' now points to the first chain with writable space (if any)
1735 * We will either use it, realign it, replace it, or resize it. */
1738 if (chain
== NULL
||
1739 (chain
->flags
& (EVBUFFER_IMMUTABLE
|EVBUFFER_MEM_PINNED_ANY
))) {
1740 /* We can't use the last_with_data chain at all. Just add a
1741 * new one that's big enough. */
1745 /* If we can fit all the data, then we don't have to do anything */
1746 if (CHAIN_SPACE_LEN(chain
) >= datlen
) {
1751 /* If the chain is completely empty, just replace it by adding a new
1753 if (chain
->off
== 0) {
1757 /* If the misalignment plus the remaining space fulfills our data
1758 * needs, we could just force an alignment to happen. Afterwards, we
1759 * have enough space. But only do this if we're saving a lot of space
1760 * and not moving too much data. Otherwise the space savings are
1761 * probably offset by the time lost in copying.
1763 if (evbuffer_chain_should_realign(chain
, datlen
)) {
1764 evbuffer_chain_align(chain
);
1769 /* At this point, we can either resize the last chunk with space in
1770 * it, use the next chunk after it, or If we add a new chunk, we waste
1771 * CHAIN_SPACE_LEN(chain) bytes in the former last chunk. If we
1772 * resize, we have to copy chain->off bytes.
1775 /* Would expanding this chunk be affordable and worthwhile? */
1776 if (CHAIN_SPACE_LEN(chain
) < chain
->buffer_len
/ 8 ||
1777 chain
->off
> MAX_TO_COPY_IN_EXPAND
) {
1778 /* It's not worth resizing this chain. Can the next one be
1780 if (chain
->next
&& CHAIN_SPACE_LEN(chain
->next
) >= datlen
) {
1781 /* Yes, we can just use the next chain (which should
1783 result
= chain
->next
;
1786 /* No; append a new chain (which will free all
1787 * terminal empty chains.) */
1791 /* Okay, we're going to try to resize this chain: Not doing so
1792 * would waste at least 1/8 of its current allocation, and we
1793 * can do so without having to copy more than
1794 * MAX_TO_COPY_IN_EXPAND bytes. */
1795 /* figure out how much space we need */
1796 size_t length
= chain
->off
+ datlen
;
1797 struct evbuffer_chain
*tmp
= evbuffer_chain_new(length
);
1801 /* copy the data over that we had so far */
1802 tmp
->off
= chain
->off
;
1803 memcpy(tmp
->buffer
, chain
->buffer
+ chain
->misalign
,
1805 /* fix up the list */
1806 EVUTIL_ASSERT(*chainp
== chain
);
1807 result
= *chainp
= tmp
;
1809 if (buf
->last
== chain
)
1812 tmp
->next
= chain
->next
;
1813 evbuffer_chain_free(chain
);
1818 result
= evbuffer_chain_insert_new(buf
, datlen
);
1822 EVUTIL_ASSERT(result
);
1823 EVUTIL_ASSERT(CHAIN_SPACE_LEN(result
) >= datlen
);
1828 /* Make sure that datlen bytes are available for writing in the last n
1829 * chains. Never copies or moves data. */
1831 _evbuffer_expand_fast(struct evbuffer
*buf
, size_t datlen
, int n
)
1833 struct evbuffer_chain
*chain
= buf
->last
, *tmp
, *next
;
1837 ASSERT_EVBUFFER_LOCKED(buf
);
1838 EVUTIL_ASSERT(n
>= 2);
1840 if (chain
== NULL
|| (chain
->flags
& EVBUFFER_IMMUTABLE
)) {
1841 /* There is no last chunk, or we can't touch the last chunk.
1842 * Just add a new chunk. */
1843 chain
= evbuffer_chain_new(datlen
);
1847 evbuffer_chain_insert(buf
, chain
);
1851 used
= 0; /* number of chains we're using space in. */
1852 avail
= 0; /* how much space they have. */
1853 /* How many bytes can we stick at the end of buffer as it is? Iterate
1854 * over the chains at the end of the buffer, tring to see how much
1855 * space we have in the first n. */
1856 for (chain
= *buf
->last_with_datap
; chain
; chain
= chain
->next
) {
1858 size_t space
= (size_t) CHAIN_SPACE_LEN(chain
);
1859 EVUTIL_ASSERT(chain
== *buf
->last_with_datap
);
1865 /* No data in chain; realign it. */
1866 chain
->misalign
= 0;
1867 avail
+= chain
->buffer_len
;
1870 if (avail
>= datlen
) {
1871 /* There is already enough space. Just return */
1878 /* There wasn't enough space in the first n chains with space in
1879 * them. Either add a new chain with enough space, or replace all
1880 * empty chains with one that has enough space, depending on n. */
1882 /* The loop ran off the end of the chains before it hit n
1883 * chains; we can add another. */
1884 EVUTIL_ASSERT(chain
== NULL
);
1886 tmp
= evbuffer_chain_new(datlen
- avail
);
1890 buf
->last
->next
= tmp
;
1892 /* (we would only set last_with_data if we added the first
1893 * chain. But if the buffer had no chains, we would have
1894 * just allocated a new chain earlier) */
1897 /* Nuke _all_ the empty chains. */
1898 int rmv_all
= 0; /* True iff we removed last_with_data. */
1899 chain
= *buf
->last_with_datap
;
1901 EVUTIL_ASSERT(chain
== buf
->first
);
1905 avail
= (size_t) CHAIN_SPACE_LEN(chain
);
1906 chain
= chain
->next
;
1910 for (; chain
; chain
= next
) {
1912 EVUTIL_ASSERT(chain
->off
== 0);
1913 evbuffer_chain_free(chain
);
1915 tmp
= evbuffer_chain_new(datlen
- avail
);
1920 buf
->last
= *buf
->last_with_datap
;
1921 (*buf
->last_with_datap
)->next
= NULL
;
1927 buf
->first
= buf
->last
= tmp
;
1928 buf
->last_with_datap
= &buf
->first
;
1930 (*buf
->last_with_datap
)->next
= tmp
;
1938 evbuffer_expand(struct evbuffer
*buf
, size_t datlen
)
1940 struct evbuffer_chain
*chain
;
1943 chain
= evbuffer_expand_singlechain(buf
, datlen
);
1944 EVBUFFER_UNLOCK(buf
);
1945 return chain
? 0 : -1;
1949 * Reads data from a file descriptor into a buffer.
1952 #if defined(_EVENT_HAVE_SYS_UIO_H) || defined(WIN32)
1953 #define USE_IOVEC_IMPL
1956 #ifdef USE_IOVEC_IMPL
1958 #ifdef _EVENT_HAVE_SYS_UIO_H
1959 /* number of iovec we use for writev, fragmentation is going to determine
1960 * how much we end up writing */
1962 #define DEFAULT_WRITE_IOVEC 128
1964 #if defined(UIO_MAXIOV) && UIO_MAXIOV < DEFAULT_WRITE_IOVEC
1965 #define NUM_WRITE_IOVEC UIO_MAXIOV
1966 #elif defined(IOV_MAX) && IOV_MAX < DEFAULT_WRITE_IOVEC
1967 #define NUM_WRITE_IOVEC IOV_MAX
1969 #define NUM_WRITE_IOVEC DEFAULT_WRITE_IOVEC
1972 #define IOV_TYPE struct iovec
1973 #define IOV_PTR_FIELD iov_base
1974 #define IOV_LEN_FIELD iov_len
1975 #define IOV_LEN_TYPE size_t
1977 #define NUM_WRITE_IOVEC 16
1978 #define IOV_TYPE WSABUF
1979 #define IOV_PTR_FIELD buf
1980 #define IOV_LEN_FIELD len
1981 #define IOV_LEN_TYPE unsigned long
1984 #define NUM_READ_IOVEC 4
1986 #define EVBUFFER_MAX_READ 4096
1988 /** Helper function to figure out which space to use for reading data into
1989 an evbuffer. Internal use only.
1991 @param buf The buffer to read into
1992 @param howmuch How much we want to read.
1993 @param vecs An array of two or more iovecs or WSABUFs.
1994 @param n_vecs_avail The length of vecs
1995 @param chainp A pointer to a variable to hold the first chain we're
1997 @param exact Boolean: if true, we do not provide more than 'howmuch'
1998 space in the vectors, even if more space is available.
1999 @return The number of buffers we're using.
2002 _evbuffer_read_setup_vecs(struct evbuffer
*buf
, ev_ssize_t howmuch
,
2003 struct evbuffer_iovec
*vecs
, int n_vecs_avail
,
2004 struct evbuffer_chain
***chainp
, int exact
)
2006 struct evbuffer_chain
*chain
;
2007 struct evbuffer_chain
**firstchainp
;
2010 ASSERT_EVBUFFER_LOCKED(buf
);
2016 /* Let firstchain be the first chain with any space on it */
2017 firstchainp
= buf
->last_with_datap
;
2018 if (CHAIN_SPACE_LEN(*firstchainp
) == 0) {
2019 firstchainp
= &(*firstchainp
)->next
;
2022 chain
= *firstchainp
;
2023 for (i
= 0; i
< n_vecs_avail
&& so_far
< (size_t)howmuch
; ++i
) {
2024 size_t avail
= (size_t) CHAIN_SPACE_LEN(chain
);
2025 if (avail
> (howmuch
- so_far
) && exact
)
2026 avail
= howmuch
- so_far
;
2027 vecs
[i
].iov_base
= CHAIN_SPACE_PTR(chain
);
2028 vecs
[i
].iov_len
= avail
;
2030 chain
= chain
->next
;
2033 *chainp
= firstchainp
;
2038 get_n_bytes_readable_on_socket(evutil_socket_t fd
)
2040 #if defined(FIONREAD) && defined(WIN32)
2041 unsigned long lng
= EVBUFFER_MAX_READ
;
2042 if (ioctlsocket(fd
, FIONREAD
, &lng
) < 0)
2045 #elif defined(FIONREAD)
2046 int n
= EVBUFFER_MAX_READ
;
2047 if (ioctl(fd
, FIONREAD
, &n
) < 0)
2051 return EVBUFFER_MAX_READ
;
2055 /* TODO(niels): should this function return ev_ssize_t and take ev_ssize_t
2058 evbuffer_read(struct evbuffer
*buf
, evutil_socket_t fd
, int howmuch
)
2060 struct evbuffer_chain
**chainp
;
2064 #ifdef USE_IOVEC_IMPL
2065 int nvecs
, i
, remaining
;
2067 struct evbuffer_chain
*chain
;
2073 if (buf
->freeze_end
) {
2078 n
= get_n_bytes_readable_on_socket(fd
);
2079 if (n
<= 0 || n
> EVBUFFER_MAX_READ
)
2080 n
= EVBUFFER_MAX_READ
;
2081 if (howmuch
< 0 || howmuch
> n
)
2084 #ifdef USE_IOVEC_IMPL
2085 /* Since we can use iovecs, we're willing to use the last
2086 * NUM_READ_IOVEC chains. */
2087 if (_evbuffer_expand_fast(buf
, howmuch
, NUM_READ_IOVEC
) == -1) {
2091 IOV_TYPE vecs
[NUM_READ_IOVEC
];
2092 #ifdef _EVBUFFER_IOVEC_IS_NATIVE
2093 nvecs
= _evbuffer_read_setup_vecs(buf
, howmuch
, vecs
,
2094 NUM_READ_IOVEC
, &chainp
, 1);
2096 /* We aren't using the native struct iovec. Therefore,
2098 struct evbuffer_iovec ev_vecs
[NUM_READ_IOVEC
];
2099 nvecs
= _evbuffer_read_setup_vecs(buf
, howmuch
, ev_vecs
, 2,
2102 for (i
=0; i
< nvecs
; ++i
)
2103 WSABUF_FROM_EVBUFFER_IOV(&vecs
[i
], &ev_vecs
[i
]);
2110 if (WSARecv(fd
, vecs
, nvecs
, &bytesRead
, &flags
, NULL
, NULL
)) {
2111 /* The read failed. It might be a close,
2112 * or it might be an error. */
2113 if (WSAGetLastError() == WSAECONNABORTED
)
2121 n
= readv(fd
, vecs
, nvecs
);
2125 #else /*!USE_IOVEC_IMPL*/
2126 /* If we don't have FIONREAD, we might waste some space here */
2127 /* XXX we _will_ waste some space here if there is any space left
2128 * over on buf->last. */
2129 if ((chain
= evbuffer_expand_singlechain(buf
, howmuch
)) == NULL
) {
2134 /* We can append new data at this point */
2135 p
= chain
->buffer
+ chain
->misalign
+ chain
->off
;
2138 n
= read(fd
, p
, howmuch
);
2140 n
= recv(fd
, p
, howmuch
, 0);
2142 #endif /* USE_IOVEC_IMPL */
2153 #ifdef USE_IOVEC_IMPL
2155 for (i
=0; i
< nvecs
; ++i
) {
2156 ev_ssize_t space
= (ev_ssize_t
) CHAIN_SPACE_LEN(*chainp
);
2157 if (space
< remaining
) {
2158 (*chainp
)->off
+= space
;
2159 remaining
-= (int)space
;
2161 (*chainp
)->off
+= remaining
;
2162 buf
->last_with_datap
= chainp
;
2165 chainp
= &(*chainp
)->next
;
2169 advance_last_with_data(buf
);
2171 buf
->total_len
+= n
;
2172 buf
->n_add_for_cb
+= n
;
2174 /* Tell someone about changes in this buffer */
2175 evbuffer_invoke_callbacks(buf
);
2178 EVBUFFER_UNLOCK(buf
);
2184 evbuffer_readfile(struct evbuffer
*buf
, evutil_socket_t fd
, ev_ssize_t howmuch
)
2188 struct evbuffer_iovec v
[2];
2192 if (buf
->freeze_end
) {
2201 /* XXX we _will_ waste some space here if there is any space left
2202 * over on buf->last. */
2203 nchains
= evbuffer_reserve_space(buf
, howmuch
, v
, 2);
2204 if (nchains
< 1 || nchains
> 2) {
2208 n
= read((int)fd
, v
[0].iov_base
, (unsigned int)v
[0].iov_len
);
2213 v
[0].iov_len
= (IOV_LEN_TYPE
) n
; /* XXXX another problem with big n.*/
2215 n
= read((int)fd
, v
[1].iov_base
, (unsigned int)v
[1].iov_len
);
2217 result
= (unsigned long) v
[0].iov_len
;
2218 evbuffer_commit_space(buf
, v
, 1);
2223 evbuffer_commit_space(buf
, v
, nchains
);
2227 EVBUFFER_UNLOCK(buf
);
2232 #ifdef USE_IOVEC_IMPL
2234 evbuffer_write_iovec(struct evbuffer
*buffer
, evutil_socket_t fd
,
2237 IOV_TYPE iov
[NUM_WRITE_IOVEC
];
2238 struct evbuffer_chain
*chain
= buffer
->first
;
2244 ASSERT_EVBUFFER_LOCKED(buffer
);
2245 /* XXX make this top out at some maximal data length? if the
2246 * buffer has (say) 1MB in it, split over 128 chains, there's
2247 * no way it all gets written in one go. */
2248 while (chain
!= NULL
&& i
< NUM_WRITE_IOVEC
&& howmuch
) {
2250 /* we cannot write the file info via writev */
2251 if (chain
->flags
& EVBUFFER_SENDFILE
)
2254 iov
[i
].IOV_PTR_FIELD
= (void *) (chain
->buffer
+ chain
->misalign
);
2255 if ((size_t)howmuch
>= chain
->off
) {
2256 /* XXXcould be problematic when windows supports mmap*/
2257 iov
[i
++].IOV_LEN_FIELD
= (IOV_LEN_TYPE
)chain
->off
;
2258 howmuch
-= chain
->off
;
2260 /* XXXcould be problematic when windows supports mmap*/
2261 iov
[i
++].IOV_LEN_FIELD
= (IOV_LEN_TYPE
)howmuch
;
2264 chain
= chain
->next
;
2271 if (WSASend(fd
, iov
, i
, &bytesSent
, 0, NULL
, NULL
))
2277 n
= writev(fd
, iov
, i
);
2285 evbuffer_write_sendfile(struct evbuffer
*buffer
, evutil_socket_t fd
,
2288 struct evbuffer_chain
*chain
= buffer
->first
;
2289 struct evbuffer_chain_fd
*info
=
2290 EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd
, chain
);
2291 #if defined(SENDFILE_IS_MACOSX) || defined(SENDFILE_IS_FREEBSD)
2293 off_t len
= chain
->off
;
2294 #elif defined(SENDFILE_IS_LINUX) || defined(SENDFILE_IS_SOLARIS)
2296 off_t offset
= chain
->misalign
;
2299 ASSERT_EVBUFFER_LOCKED(buffer
);
2301 #if defined(SENDFILE_IS_MACOSX)
2302 res
= sendfile(info
->fd
, fd
, chain
->misalign
, &len
, NULL
, 0);
2303 if (res
== -1 && !EVUTIL_ERR_RW_RETRIABLE(errno
))
2307 #elif defined(SENDFILE_IS_FREEBSD)
2308 res
= sendfile(info
->fd
, fd
, chain
->misalign
, chain
->off
, NULL
, &len
, 0);
2309 if (res
== -1 && !EVUTIL_ERR_RW_RETRIABLE(errno
))
2313 #elif defined(SENDFILE_IS_LINUX)
2314 /* TODO(niels): implement splice */
2315 res
= sendfile(fd
, info
->fd
, &offset
, chain
->off
);
2316 if (res
== -1 && EVUTIL_ERR_RW_RETRIABLE(errno
)) {
2317 /* if this is EAGAIN or EINTR return 0; otherwise, -1 */
2321 #elif defined(SENDFILE_IS_SOLARIS)
2323 const off_t offset_orig
= offset
;
2324 res
= sendfile(fd
, info
->fd
, &offset
, chain
->off
);
2325 if (res
== -1 && EVUTIL_ERR_RW_RETRIABLE(errno
)) {
2326 if (offset
- offset_orig
)
2327 return offset
- offset_orig
;
2328 /* if this is EAGAIN or EINTR and no bytes were
2329 * written, return 0 */
2339 evbuffer_write_atmost(struct evbuffer
*buffer
, evutil_socket_t fd
,
2344 EVBUFFER_LOCK(buffer
);
2346 if (buffer
->freeze_start
) {
2350 if (howmuch
< 0 || (size_t)howmuch
> buffer
->total_len
)
2351 howmuch
= buffer
->total_len
;
2355 struct evbuffer_chain
*chain
= buffer
->first
;
2356 if (chain
!= NULL
&& (chain
->flags
& EVBUFFER_SENDFILE
))
2357 n
= evbuffer_write_sendfile(buffer
, fd
, howmuch
);
2360 #ifdef USE_IOVEC_IMPL
2361 n
= evbuffer_write_iovec(buffer
, fd
, howmuch
);
2362 #elif defined(WIN32)
2363 /* XXX(nickm) Don't disable this code until we know if
2364 * the WSARecv code above works. */
2365 void *p
= evbuffer_pullup(buffer
, howmuch
);
2366 n
= send(fd
, p
, howmuch
, 0);
2368 void *p
= evbuffer_pullup(buffer
, howmuch
);
2369 n
= write(fd
, p
, howmuch
);
2377 evbuffer_drain(buffer
, n
);
2380 EVBUFFER_UNLOCK(buffer
);
2385 evbuffer_write(struct evbuffer
*buffer
, evutil_socket_t fd
)
2387 return evbuffer_write_atmost(buffer
, fd
, -1);
2391 evbuffer_find(struct evbuffer
*buffer
, const unsigned char *what
, size_t len
)
2393 unsigned char *search
;
2394 struct evbuffer_ptr ptr
;
2396 EVBUFFER_LOCK(buffer
);
2398 ptr
= evbuffer_search(buffer
, (const char *)what
, len
, NULL
);
2402 search
= evbuffer_pullup(buffer
, ptr
.pos
+ len
);
2406 EVBUFFER_UNLOCK(buffer
);
2411 evbuffer_ptr_set(struct evbuffer
*buf
, struct evbuffer_ptr
*pos
,
2412 size_t position
, enum evbuffer_ptr_how how
)
2414 size_t left
= position
;
2415 struct evbuffer_chain
*chain
= NULL
;
2420 case EVBUFFER_PTR_SET
:
2422 pos
->pos
= position
;
2425 case EVBUFFER_PTR_ADD
:
2426 /* this avoids iterating over all previous chains if
2427 we just want to advance the position */
2428 chain
= pos
->_internal
.chain
;
2429 pos
->pos
+= position
;
2430 position
= pos
->_internal
.pos_in_chain
;
2434 while (chain
&& position
+ left
>= chain
->off
) {
2435 left
-= chain
->off
- position
;
2436 chain
= chain
->next
;
2440 pos
->_internal
.chain
= chain
;
2441 pos
->_internal
.pos_in_chain
= position
+ left
;
2443 pos
->_internal
.chain
= NULL
;
2447 EVBUFFER_UNLOCK(buf
);
2449 return chain
!= NULL
? 0 : -1;
2453 Compare the bytes in buf at position pos to the len bytes in mem. Return
2454 less than 0, 0, or greater than 0 as memcmp.
2457 evbuffer_ptr_memcmp(const struct evbuffer
*buf
, const struct evbuffer_ptr
*pos
,
2458 const char *mem
, size_t len
)
2460 struct evbuffer_chain
*chain
;
2464 ASSERT_EVBUFFER_LOCKED(buf
);
2466 if (pos
->pos
+ len
> buf
->total_len
)
2469 chain
= pos
->_internal
.chain
;
2470 position
= pos
->_internal
.pos_in_chain
;
2471 while (len
&& chain
) {
2472 size_t n_comparable
;
2473 if (len
+ position
> chain
->off
)
2474 n_comparable
= chain
->off
- position
;
2477 r
= memcmp(chain
->buffer
+ chain
->misalign
+ position
, mem
,
2481 mem
+= n_comparable
;
2482 len
-= n_comparable
;
2484 chain
= chain
->next
;
2491 evbuffer_search(struct evbuffer
*buffer
, const char *what
, size_t len
, const struct evbuffer_ptr
*start
)
2493 return evbuffer_search_range(buffer
, what
, len
, start
, NULL
);
2497 evbuffer_search_range(struct evbuffer
*buffer
, const char *what
, size_t len
, const struct evbuffer_ptr
*start
, const struct evbuffer_ptr
*end
)
2499 struct evbuffer_ptr pos
;
2500 struct evbuffer_chain
*chain
, *last_chain
= NULL
;
2501 const unsigned char *p
;
2504 EVBUFFER_LOCK(buffer
);
2507 memcpy(&pos
, start
, sizeof(pos
));
2508 chain
= pos
._internal
.chain
;
2511 chain
= pos
._internal
.chain
= buffer
->first
;
2512 pos
._internal
.pos_in_chain
= 0;
2516 last_chain
= end
->_internal
.chain
;
2518 if (!len
|| len
> EV_SSIZE_MAX
)
2524 const unsigned char *start_at
=
2525 chain
->buffer
+ chain
->misalign
+
2526 pos
._internal
.pos_in_chain
;
2527 p
= memchr(start_at
, first
,
2528 chain
->off
- pos
._internal
.pos_in_chain
);
2530 pos
.pos
+= p
- start_at
;
2531 pos
._internal
.pos_in_chain
+= p
- start_at
;
2532 if (!evbuffer_ptr_memcmp(buffer
, &pos
, what
, len
)) {
2533 if (end
&& pos
.pos
+ (ev_ssize_t
)len
> end
->pos
)
2539 ++pos
._internal
.pos_in_chain
;
2540 if (pos
._internal
.pos_in_chain
== chain
->off
) {
2541 chain
= pos
._internal
.chain
= chain
->next
;
2542 pos
._internal
.pos_in_chain
= 0;
2545 if (chain
== last_chain
)
2547 pos
.pos
+= chain
->off
- pos
._internal
.pos_in_chain
;
2548 chain
= pos
._internal
.chain
= chain
->next
;
2549 pos
._internal
.pos_in_chain
= 0;
2555 pos
._internal
.chain
= NULL
;
2557 EVBUFFER_UNLOCK(buffer
);
2562 evbuffer_peek(struct evbuffer
*buffer
, ev_ssize_t len
,
2563 struct evbuffer_ptr
*start_at
,
2564 struct evbuffer_iovec
*vec
, int n_vec
)
2566 struct evbuffer_chain
*chain
;
2568 ev_ssize_t len_so_far
= 0;
2570 EVBUFFER_LOCK(buffer
);
2573 chain
= start_at
->_internal
.chain
;
2574 len_so_far
= chain
->off
2575 - start_at
->_internal
.pos_in_chain
;
2578 vec
[0].iov_base
= chain
->buffer
+ chain
->misalign
2579 + start_at
->_internal
.pos_in_chain
;
2580 vec
[0].iov_len
= len_so_far
;
2582 chain
= chain
->next
;
2584 chain
= buffer
->first
;
2587 if (n_vec
== 0 && len
< 0) {
2588 /* If no vectors are provided and they asked for "everything",
2589 * pretend they asked for the actual available amount. */
2590 len
= buffer
->total_len
- len_so_far
;
2594 if (len
>= 0 && len_so_far
>= len
)
2597 vec
[idx
].iov_base
= chain
->buffer
+ chain
->misalign
;
2598 vec
[idx
].iov_len
= chain
->off
;
2603 len_so_far
+= chain
->off
;
2604 chain
= chain
->next
;
2607 EVBUFFER_UNLOCK(buffer
);
2614 evbuffer_add_vprintf(struct evbuffer
*buf
, const char *fmt
, va_list ap
)
2618 int sz
, result
= -1;
2620 struct evbuffer_chain
*chain
;
2625 if (buf
->freeze_end
) {
2629 /* make sure that at least some space is available */
2630 if ((chain
= evbuffer_expand_singlechain(buf
, 64)) == NULL
)
2635 size_t used
= chain
->misalign
+ chain
->off
;
2636 buffer
= (char *)chain
->buffer
+ chain
->misalign
+ chain
->off
;
2637 EVUTIL_ASSERT(chain
->buffer_len
>= used
);
2638 space
= chain
->buffer_len
- used
;
2640 buffer
= (char*) CHAIN_SPACE_PTR(chain
);
2641 space
= (size_t) CHAIN_SPACE_LEN(chain
);
2644 #define va_copy(dst, src) memcpy(&(dst), &(src), sizeof(va_list))
2648 sz
= evutil_vsnprintf(buffer
, space
, fmt
, aq
);
2654 if ((size_t)sz
< space
) {
2656 buf
->total_len
+= sz
;
2657 buf
->n_add_for_cb
+= sz
;
2659 advance_last_with_data(buf
);
2660 evbuffer_invoke_callbacks(buf
);
2664 if ((chain
= evbuffer_expand_singlechain(buf
, sz
+ 1)) == NULL
)
2670 EVBUFFER_UNLOCK(buf
);
2675 evbuffer_add_printf(struct evbuffer
*buf
, const char *fmt
, ...)
2681 res
= evbuffer_add_vprintf(buf
, fmt
, ap
);
2688 evbuffer_add_reference(struct evbuffer
*outbuf
,
2689 const void *data
, size_t datlen
,
2690 evbuffer_ref_cleanup_cb cleanupfn
, void *extra
)
2692 struct evbuffer_chain
*chain
;
2693 struct evbuffer_chain_reference
*info
;
2696 chain
= evbuffer_chain_new(sizeof(struct evbuffer_chain_reference
));
2699 chain
->flags
|= EVBUFFER_REFERENCE
| EVBUFFER_IMMUTABLE
;
2700 chain
->buffer
= (u_char
*)data
;
2701 chain
->buffer_len
= datlen
;
2702 chain
->off
= datlen
;
2704 info
= EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_reference
, chain
);
2705 info
->cleanupfn
= cleanupfn
;
2706 info
->extra
= extra
;
2708 EVBUFFER_LOCK(outbuf
);
2709 if (outbuf
->freeze_end
) {
2710 /* don't call chain_free; we do not want to actually invoke
2711 * the cleanup function */
2715 evbuffer_chain_insert(outbuf
, chain
);
2716 outbuf
->n_add_for_cb
+= datlen
;
2718 evbuffer_invoke_callbacks(outbuf
);
2722 EVBUFFER_UNLOCK(outbuf
);
2727 /* TODO(niels): maybe we don't want to own the fd, however, in that
2728 * case, we should dup it - dup is cheap. Perhaps, we should use a
2731 /* TODO(niels): we may want to add to automagically convert to mmap, in
2732 * case evbuffer_remove() or evbuffer_pullup() are being used.
2735 evbuffer_add_file(struct evbuffer
*outbuf
, int fd
,
2736 ev_off_t offset
, ev_off_t length
)
2738 #if defined(USE_SENDFILE) || defined(_EVENT_HAVE_MMAP)
2739 struct evbuffer_chain
*chain
;
2740 struct evbuffer_chain_fd
*info
;
2742 #if defined(USE_SENDFILE)
2743 int sendfile_okay
= 1;
2747 #if defined(USE_SENDFILE)
2749 EVBUFFER_LOCK(outbuf
);
2750 sendfile_okay
= outbuf
->flags
& EVBUFFER_FLAG_DRAINS_TO_FD
;
2751 EVBUFFER_UNLOCK(outbuf
);
2754 if (use_sendfile
&& sendfile_okay
) {
2755 chain
= evbuffer_chain_new(sizeof(struct evbuffer_chain_fd
));
2756 if (chain
== NULL
) {
2757 event_warn("%s: out of memory", __func__
);
2761 chain
->flags
|= EVBUFFER_SENDFILE
| EVBUFFER_IMMUTABLE
;
2762 chain
->buffer
= NULL
; /* no reading possible */
2763 chain
->buffer_len
= length
+ offset
;
2764 chain
->off
= length
;
2765 chain
->misalign
= offset
;
2767 info
= EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd
, chain
);
2770 EVBUFFER_LOCK(outbuf
);
2771 if (outbuf
->freeze_end
) {
2775 outbuf
->n_add_for_cb
+= length
;
2776 evbuffer_chain_insert(outbuf
, chain
);
2780 #if defined(_EVENT_HAVE_MMAP)
2782 void *mapped
= mmap(NULL
, length
+ offset
, PROT_READ
,
2791 /* some mmap implementations require offset to be a multiple of
2792 * the page size. most users of this api, are likely to use 0
2793 * so mapping everything is not likely to be a problem.
2794 * TODO(niels): determine page size and round offset to that
2795 * page size to avoid mapping too much memory.
2797 if (mapped
== MAP_FAILED
) {
2798 event_warn("%s: mmap(%d, %d, %zu) failed",
2799 __func__
, fd
, 0, (size_t)(offset
+ length
));
2802 chain
= evbuffer_chain_new(sizeof(struct evbuffer_chain_fd
));
2803 if (chain
== NULL
) {
2804 event_warn("%s: out of memory", __func__
);
2805 munmap(mapped
, length
);
2809 chain
->flags
|= EVBUFFER_MMAP
| EVBUFFER_IMMUTABLE
;
2810 chain
->buffer
= mapped
;
2811 chain
->buffer_len
= length
+ offset
;
2812 chain
->off
= length
+ offset
;
2814 info
= EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd
, chain
);
2817 EVBUFFER_LOCK(outbuf
);
2818 if (outbuf
->freeze_end
) {
2820 evbuffer_chain_free(chain
);
2823 outbuf
->n_add_for_cb
+= length
;
2825 evbuffer_chain_insert(outbuf
, chain
);
2827 /* we need to subtract whatever we don't need */
2828 evbuffer_drain(outbuf
, offset
);
2833 /* the default implementation */
2834 struct evbuffer
*tmp
= evbuffer_new();
2841 #define lseek _lseeki64
2843 if (lseek(fd
, offset
, SEEK_SET
) == -1) {
2848 /* we add everything to a temporary buffer, so that we
2849 * can abort without side effects if the read fails.
2852 read
= evbuffer_readfile(tmp
, fd
, (ev_ssize_t
)length
);
2861 EVBUFFER_LOCK(outbuf
);
2862 if (outbuf
->freeze_end
) {
2866 evbuffer_add_buffer(outbuf
, tmp
);
2870 #define close _close
2877 evbuffer_invoke_callbacks(outbuf
);
2878 EVBUFFER_UNLOCK(outbuf
);
2885 evbuffer_setcb(struct evbuffer
*buffer
, evbuffer_cb cb
, void *cbarg
)
2887 EVBUFFER_LOCK(buffer
);
2889 if (!TAILQ_EMPTY(&buffer
->callbacks
))
2890 evbuffer_remove_all_callbacks(buffer
);
2893 struct evbuffer_cb_entry
*ent
=
2894 evbuffer_add_cb(buffer
, NULL
, cbarg
);
2895 ent
->cb
.cb_obsolete
= cb
;
2896 ent
->flags
|= EVBUFFER_CB_OBSOLETE
;
2898 EVBUFFER_UNLOCK(buffer
);
2901 struct evbuffer_cb_entry
*
2902 evbuffer_add_cb(struct evbuffer
*buffer
, evbuffer_cb_func cb
, void *cbarg
)
2904 struct evbuffer_cb_entry
*e
;
2905 if (! (e
= mm_calloc(1, sizeof(struct evbuffer_cb_entry
))))
2907 EVBUFFER_LOCK(buffer
);
2910 e
->flags
= EVBUFFER_CB_ENABLED
;
2911 TAILQ_INSERT_HEAD(&buffer
->callbacks
, e
, next
);
2912 EVBUFFER_UNLOCK(buffer
);
2917 evbuffer_remove_cb_entry(struct evbuffer
*buffer
,
2918 struct evbuffer_cb_entry
*ent
)
2920 EVBUFFER_LOCK(buffer
);
2921 TAILQ_REMOVE(&buffer
->callbacks
, ent
, next
);
2922 EVBUFFER_UNLOCK(buffer
);
2928 evbuffer_remove_cb(struct evbuffer
*buffer
, evbuffer_cb_func cb
, void *cbarg
)
2930 struct evbuffer_cb_entry
*cbent
;
2932 EVBUFFER_LOCK(buffer
);
2933 TAILQ_FOREACH(cbent
, &buffer
->callbacks
, next
) {
2934 if (cb
== cbent
->cb
.cb_func
&& cbarg
== cbent
->cbarg
) {
2935 result
= evbuffer_remove_cb_entry(buffer
, cbent
);
2940 EVBUFFER_UNLOCK(buffer
);
2945 evbuffer_cb_set_flags(struct evbuffer
*buffer
,
2946 struct evbuffer_cb_entry
*cb
, ev_uint32_t flags
)
2948 /* the user isn't allowed to mess with these. */
2949 flags
&= ~EVBUFFER_CB_INTERNAL_FLAGS
;
2950 EVBUFFER_LOCK(buffer
);
2952 EVBUFFER_UNLOCK(buffer
);
2957 evbuffer_cb_clear_flags(struct evbuffer
*buffer
,
2958 struct evbuffer_cb_entry
*cb
, ev_uint32_t flags
)
2960 /* the user isn't allowed to mess with these. */
2961 flags
&= ~EVBUFFER_CB_INTERNAL_FLAGS
;
2962 EVBUFFER_LOCK(buffer
);
2963 cb
->flags
&= ~flags
;
2964 EVBUFFER_UNLOCK(buffer
);
2969 evbuffer_freeze(struct evbuffer
*buffer
, int start
)
2971 EVBUFFER_LOCK(buffer
);
2973 buffer
->freeze_start
= 1;
2975 buffer
->freeze_end
= 1;
2976 EVBUFFER_UNLOCK(buffer
);
2981 evbuffer_unfreeze(struct evbuffer
*buffer
, int start
)
2983 EVBUFFER_LOCK(buffer
);
2985 buffer
->freeze_start
= 0;
2987 buffer
->freeze_end
= 0;
2988 EVBUFFER_UNLOCK(buffer
);
2994 evbuffer_cb_suspend(struct evbuffer
*buffer
, struct evbuffer_cb_entry
*cb
)
2996 if (!(cb
->flags
& EVBUFFER_CB_SUSPENDED
)) {
2997 cb
->size_before_suspend
= evbuffer_get_length(buffer
);
2998 cb
->flags
|= EVBUFFER_CB_SUSPENDED
;
3003 evbuffer_cb_unsuspend(struct evbuffer
*buffer
, struct evbuffer_cb_entry
*cb
)
3005 if ((cb
->flags
& EVBUFFER_CB_SUSPENDED
)) {
3006 unsigned call
= (cb
->flags
& EVBUFFER_CB_CALL_ON_UNSUSPEND
);
3007 size_t sz
= cb
->size_before_suspend
;
3008 cb
->flags
&= ~(EVBUFFER_CB_SUSPENDED
|
3009 EVBUFFER_CB_CALL_ON_UNSUSPEND
);
3010 cb
->size_before_suspend
= 0;
3011 if (call
&& (cb
->flags
& EVBUFFER_CB_ENABLED
)) {
3012 cb
->cb(buffer
, sz
, evbuffer_get_length(buffer
), cb
->cbarg
);
3018 /* These hooks are exposed so that the unit tests can temporarily disable
3019 * sendfile support in order to test mmap, or both to test linear
3020 * access. Don't use it; if we need to add a way to disable sendfile support
3021 * in the future, it will probably be via an alternate version of
3022 * evbuffer_add_file() with a 'flags' argument.
3024 int _evbuffer_testing_use_sendfile(void);
3025 int _evbuffer_testing_use_mmap(void);
3026 int _evbuffer_testing_use_linear_file_access(void);
3029 _evbuffer_testing_use_sendfile(void)
3036 #ifdef _EVENT_HAVE_MMAP
3042 _evbuffer_testing_use_mmap(void)
3048 #ifdef _EVENT_HAVE_MMAP
3055 _evbuffer_testing_use_linear_file_access(void)
3060 #ifdef _EVENT_HAVE_MMAP