1 /* $NetBSD: buffer.c,v 1.3 2015/01/29 07:26:02 spz Exp $ */
3 * Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu>
4 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include "event2/event-config.h"
30 #include <sys/cdefs.h>
31 __RCSID("$NetBSD: buffer.c,v 1.3 2015/01/29 07:26:02 spz Exp $");
39 #ifdef _EVENT_HAVE_VASPRINTF
40 /* If we have vasprintf, we need to define this before we include stdio.h. */
44 #include <sys/types.h>
46 #ifdef _EVENT_HAVE_SYS_TIME_H
50 #ifdef _EVENT_HAVE_SYS_SOCKET_H
51 #include <sys/socket.h>
54 #ifdef _EVENT_HAVE_SYS_UIO_H
58 #ifdef _EVENT_HAVE_SYS_IOCTL_H
59 #include <sys/ioctl.h>
62 #ifdef _EVENT_HAVE_SYS_MMAN_H
66 #ifdef _EVENT_HAVE_SYS_SENDFILE_H
67 #include <sys/sendfile.h>
74 #ifdef _EVENT_HAVE_STDARG_H
77 #ifdef _EVENT_HAVE_UNISTD_H
82 #include "event2/event.h"
83 #include "event2/buffer.h"
84 #include "event2/buffer_compat.h"
85 #include "event2/bufferevent.h"
86 #include "event2/bufferevent_compat.h"
87 #include "event2/bufferevent_struct.h"
88 #include "event2/thread.h"
89 #include "event2/event-config.h"
90 #include <sys/cdefs.h>
91 __RCSID("$NetBSD: buffer.c,v 1.3 2015/01/29 07:26:02 spz Exp $");
92 #include "log-internal.h"
93 #include "mm-internal.h"
94 #include "util-internal.h"
95 #include "evthread-internal.h"
96 #include "evbuffer-internal.h"
97 #include "bufferevent-internal.h"
99 /* some systems do not have MAP_FAILED */
101 #define MAP_FAILED ((void *)-1)
104 /* send file support */
105 #if defined(_EVENT_HAVE_SYS_SENDFILE_H) && defined(_EVENT_HAVE_SENDFILE) && defined(__linux__)
106 #define USE_SENDFILE 1
107 #define SENDFILE_IS_LINUX 1
108 #elif defined(_EVENT_HAVE_SENDFILE) && defined(__FreeBSD__)
109 #define USE_SENDFILE 1
110 #define SENDFILE_IS_FREEBSD 1
111 #elif defined(_EVENT_HAVE_SENDFILE) && defined(__APPLE__)
112 #define USE_SENDFILE 1
113 #define SENDFILE_IS_MACOSX 1
114 #elif defined(_EVENT_HAVE_SENDFILE) && defined(__sun__) && defined(__svr4__)
115 #define USE_SENDFILE 1
116 #define SENDFILE_IS_SOLARIS 1
120 static int use_sendfile
= 1;
122 #ifdef _EVENT_HAVE_MMAP
123 static int use_mmap
= 1;
127 /* Mask of user-selectable callback flags. */
128 #define EVBUFFER_CB_USER_FLAGS 0xffff
129 /* Mask of all internal-use-only flags. */
130 #define EVBUFFER_CB_INTERNAL_FLAGS 0xffff0000
132 /* Flag set if the callback is using the cb_obsolete function pointer */
133 #define EVBUFFER_CB_OBSOLETE 0x00040000
135 /* evbuffer_chain support */
136 #define CHAIN_SPACE_PTR(ch) ((ch)->buffer + (ch)->misalign + (ch)->off)
137 #define CHAIN_SPACE_LEN(ch) ((ch)->flags & EVBUFFER_IMMUTABLE ? \
138 0 : (ch)->buffer_len - ((ch)->misalign + (ch)->off))
140 #define CHAIN_PINNED(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_ANY) != 0)
141 #define CHAIN_PINNED_R(ch) (((ch)->flags & EVBUFFER_MEM_PINNED_R) != 0)
143 static void evbuffer_chain_align(struct evbuffer_chain
*chain
);
144 static int evbuffer_chain_should_realign(struct evbuffer_chain
*chain
,
146 static void evbuffer_deferred_callback(struct deferred_cb
*cb
, void *arg
);
147 static int evbuffer_ptr_memcmp(const struct evbuffer
*buf
,
148 const struct evbuffer_ptr
*pos
, const char *mem
, size_t len
);
149 static struct evbuffer_chain
*evbuffer_expand_singlechain(struct evbuffer
*buf
,
153 static int evbuffer_readfile(struct evbuffer
*buf
, evutil_socket_t fd
,
156 #define evbuffer_readfile evbuffer_read
159 static struct evbuffer_chain
*
160 evbuffer_chain_new(size_t size
)
162 struct evbuffer_chain
*chain
;
165 if (size
> EVBUFFER_CHAIN_MAX
- EVBUFFER_CHAIN_SIZE
)
168 size
+= EVBUFFER_CHAIN_SIZE
;
170 /* get the next largest memory that can hold the buffer */
171 if (size
< EVBUFFER_CHAIN_MAX
/ 2) {
172 to_alloc
= MIN_BUFFER_SIZE
;
173 while (to_alloc
< size
) {
180 /* we get everything in one chunk */
181 if ((chain
= mm_malloc(to_alloc
)) == NULL
)
184 memset(chain
, 0, EVBUFFER_CHAIN_SIZE
);
186 chain
->buffer_len
= to_alloc
- EVBUFFER_CHAIN_SIZE
;
188 /* this way we can manipulate the buffer to different addresses,
189 * which is required for mmap for example.
191 chain
->buffer
= EVBUFFER_CHAIN_EXTRA(u_char
, chain
);
197 evbuffer_chain_free(struct evbuffer_chain
*chain
)
199 if (CHAIN_PINNED(chain
)) {
200 chain
->flags
|= EVBUFFER_DANGLING
;
203 if (chain
->flags
& (EVBUFFER_MMAP
|EVBUFFER_SENDFILE
|
204 EVBUFFER_REFERENCE
)) {
205 if (chain
->flags
& EVBUFFER_REFERENCE
) {
206 struct evbuffer_chain_reference
*info
=
207 EVBUFFER_CHAIN_EXTRA(
208 struct evbuffer_chain_reference
,
211 (*info
->cleanupfn
)(chain
->buffer
,
215 #ifdef _EVENT_HAVE_MMAP
216 if (chain
->flags
& EVBUFFER_MMAP
) {
217 struct evbuffer_chain_fd
*info
=
218 EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd
,
220 if (munmap(chain
->buffer
, chain
->buffer_len
) == -1)
221 event_warn("%s: munmap failed", __func__
);
222 if (close(info
->fd
) == -1)
223 event_warn("%s: close(%d) failed",
228 if (chain
->flags
& EVBUFFER_SENDFILE
) {
229 struct evbuffer_chain_fd
*info
=
230 EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd
,
232 if (close(info
->fd
) == -1)
233 event_warn("%s: close(%d) failed",
243 evbuffer_free_all_chains(struct evbuffer_chain
*chain
)
245 struct evbuffer_chain
*next
;
246 for (; chain
; chain
= next
) {
248 evbuffer_chain_free(chain
);
254 evbuffer_chains_all_empty(struct evbuffer_chain
*chain
)
256 for (; chain
; chain
= chain
->next
) {
263 /* The definition is needed for EVUTIL_ASSERT, which uses sizeof to avoid
264 "unused variable" warnings. */
265 static inline int evbuffer_chains_all_empty(struct evbuffer_chain
*chain
) {
270 /* Free all trailing chains in 'buf' that are neither pinned nor empty, prior
271 * to replacing them all with a new chain. Return a pointer to the place
272 * where the new chain will go.
274 * Internal; requires lock. The caller must fix up buf->last and buf->first
275 * as needed; they might have been freed.
277 static struct evbuffer_chain
**
278 evbuffer_free_trailing_empty_chains(struct evbuffer
*buf
)
280 struct evbuffer_chain
**ch
= buf
->last_with_datap
;
281 /* Find the first victim chain. It might be *last_with_datap */
282 while ((*ch
) && ((*ch
)->off
!= 0 || CHAIN_PINNED(*ch
)))
285 EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch
));
286 evbuffer_free_all_chains(*ch
);
292 /* Add a single chain 'chain' to the end of 'buf', freeing trailing empty
293 * chains as necessary. Requires lock. Does not schedule callbacks.
296 evbuffer_chain_insert(struct evbuffer
*buf
,
297 struct evbuffer_chain
*chain
)
299 ASSERT_EVBUFFER_LOCKED(buf
);
300 if (*buf
->last_with_datap
== NULL
) {
301 /* There are no chains data on the buffer at all. */
302 EVUTIL_ASSERT(buf
->last_with_datap
== &buf
->first
);
303 EVUTIL_ASSERT(buf
->first
== NULL
);
304 buf
->first
= buf
->last
= chain
;
306 struct evbuffer_chain
**ch
= buf
->last_with_datap
;
307 /* Find the first victim chain. It might be *last_with_datap */
308 while ((*ch
) && ((*ch
)->off
!= 0 || CHAIN_PINNED(*ch
)))
311 /* There is no victim; just append this new chain. */
312 buf
->last
->next
= chain
;
314 buf
->last_with_datap
= &buf
->last
->next
;
316 /* Replace all victim chains with this chain. */
317 EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch
));
318 evbuffer_free_all_chains(*ch
);
323 buf
->total_len
+= chain
->off
;
326 static inline struct evbuffer_chain
*
327 evbuffer_chain_insert_new(struct evbuffer
*buf
, size_t datlen
)
329 struct evbuffer_chain
*chain
;
330 if ((chain
= evbuffer_chain_new(datlen
)) == NULL
)
332 evbuffer_chain_insert(buf
, chain
);
337 _evbuffer_chain_pin(struct evbuffer_chain
*chain
, unsigned flag
)
339 EVUTIL_ASSERT((chain
->flags
& flag
) == 0);
340 chain
->flags
|= flag
;
344 _evbuffer_chain_unpin(struct evbuffer_chain
*chain
, unsigned flag
)
346 EVUTIL_ASSERT((chain
->flags
& flag
) != 0);
347 chain
->flags
&= ~flag
;
348 if (chain
->flags
& EVBUFFER_DANGLING
)
349 evbuffer_chain_free(chain
);
355 struct evbuffer
*buffer
;
357 buffer
= mm_calloc(1, sizeof(struct evbuffer
));
361 TAILQ_INIT(&buffer
->callbacks
);
363 buffer
->last_with_datap
= &buffer
->first
;
369 evbuffer_set_flags(struct evbuffer
*buf
, ev_uint64_t flags
)
372 buf
->flags
|= (ev_uint32_t
)flags
;
373 EVBUFFER_UNLOCK(buf
);
378 evbuffer_clear_flags(struct evbuffer
*buf
, ev_uint64_t flags
)
381 buf
->flags
&= ~(ev_uint32_t
)flags
;
382 EVBUFFER_UNLOCK(buf
);
387 _evbuffer_incref(struct evbuffer
*buf
)
391 EVBUFFER_UNLOCK(buf
);
395 _evbuffer_incref_and_lock(struct evbuffer
*buf
)
402 evbuffer_defer_callbacks(struct evbuffer
*buffer
, struct event_base
*base
)
404 EVBUFFER_LOCK(buffer
);
405 buffer
->cb_queue
= event_base_get_deferred_cb_queue(base
);
406 buffer
->deferred_cbs
= 1;
407 event_deferred_cb_init(&buffer
->deferred
,
408 evbuffer_deferred_callback
, buffer
);
409 EVBUFFER_UNLOCK(buffer
);
414 evbuffer_enable_locking(struct evbuffer
*buf
, void *lock
)
416 #ifdef _EVENT_DISABLE_THREAD_SUPPORT
423 EVTHREAD_ALLOC_LOCK(lock
, EVTHREAD_LOCKTYPE_RECURSIVE
);
438 evbuffer_set_parent(struct evbuffer
*buf
, struct bufferevent
*bev
)
442 EVBUFFER_UNLOCK(buf
);
446 evbuffer_run_callbacks(struct evbuffer
*buffer
, int running_deferred
)
448 struct evbuffer_cb_entry
*cbent
, *next
;
449 struct evbuffer_cb_info info
;
451 ev_uint32_t mask
, masked_val
;
454 if (running_deferred
) {
455 mask
= EVBUFFER_CB_NODEFER
|EVBUFFER_CB_ENABLED
;
456 masked_val
= EVBUFFER_CB_ENABLED
;
457 } else if (buffer
->deferred_cbs
) {
458 mask
= EVBUFFER_CB_NODEFER
|EVBUFFER_CB_ENABLED
;
459 masked_val
= EVBUFFER_CB_NODEFER
|EVBUFFER_CB_ENABLED
;
460 /* Don't zero-out n_add/n_del, since the deferred callbacks
461 will want to see them. */
464 mask
= EVBUFFER_CB_ENABLED
;
465 masked_val
= EVBUFFER_CB_ENABLED
;
468 ASSERT_EVBUFFER_LOCKED(buffer
);
470 if (TAILQ_EMPTY(&buffer
->callbacks
)) {
471 buffer
->n_add_for_cb
= buffer
->n_del_for_cb
= 0;
474 if (buffer
->n_add_for_cb
== 0 && buffer
->n_del_for_cb
== 0)
477 new_size
= buffer
->total_len
;
478 info
.orig_size
= new_size
+ buffer
->n_del_for_cb
- buffer
->n_add_for_cb
;
479 info
.n_added
= buffer
->n_add_for_cb
;
480 info
.n_deleted
= buffer
->n_del_for_cb
;
482 buffer
->n_add_for_cb
= 0;
483 buffer
->n_del_for_cb
= 0;
485 for (cbent
= TAILQ_FIRST(&buffer
->callbacks
);
486 cbent
!= TAILQ_END(&buffer
->callbacks
);
488 /* Get the 'next' pointer now in case this callback decides
489 * to remove itself or something. */
490 next
= TAILQ_NEXT(cbent
, next
);
492 if ((cbent
->flags
& mask
) != masked_val
)
495 if ((cbent
->flags
& EVBUFFER_CB_OBSOLETE
))
496 cbent
->cb
.cb_obsolete(buffer
,
497 info
.orig_size
, new_size
, cbent
->cbarg
);
499 cbent
->cb
.cb_func(buffer
, &info
, cbent
->cbarg
);
504 evbuffer_invoke_callbacks(struct evbuffer
*buffer
)
506 if (TAILQ_EMPTY(&buffer
->callbacks
)) {
507 buffer
->n_add_for_cb
= buffer
->n_del_for_cb
= 0;
511 if (buffer
->deferred_cbs
) {
512 if (buffer
->deferred
.queued
)
514 _evbuffer_incref_and_lock(buffer
);
516 bufferevent_incref(buffer
->parent
);
517 EVBUFFER_UNLOCK(buffer
);
518 event_deferred_cb_schedule(buffer
->cb_queue
, &buffer
->deferred
);
521 evbuffer_run_callbacks(buffer
, 0);
525 evbuffer_deferred_callback(struct deferred_cb
*cb
, void *arg
)
527 struct bufferevent
*parent
= NULL
;
528 struct evbuffer
*buffer
= arg
;
530 /* XXXX It would be better to run these callbacks without holding the
532 EVBUFFER_LOCK(buffer
);
533 parent
= buffer
->parent
;
534 evbuffer_run_callbacks(buffer
, 1);
535 _evbuffer_decref_and_unlock(buffer
);
537 bufferevent_decref(parent
);
541 evbuffer_remove_all_callbacks(struct evbuffer
*buffer
)
543 struct evbuffer_cb_entry
*cbent
;
545 while ((cbent
= TAILQ_FIRST(&buffer
->callbacks
))) {
546 TAILQ_REMOVE(&buffer
->callbacks
, cbent
, next
);
552 _evbuffer_decref_and_unlock(struct evbuffer
*buffer
)
554 struct evbuffer_chain
*chain
, *next
;
555 ASSERT_EVBUFFER_LOCKED(buffer
);
557 EVUTIL_ASSERT(buffer
->refcnt
> 0);
559 if (--buffer
->refcnt
> 0) {
560 EVBUFFER_UNLOCK(buffer
);
564 for (chain
= buffer
->first
; chain
!= NULL
; chain
= next
) {
566 evbuffer_chain_free(chain
);
568 evbuffer_remove_all_callbacks(buffer
);
569 if (buffer
->deferred_cbs
)
570 event_deferred_cb_cancel(buffer
->cb_queue
, &buffer
->deferred
);
572 EVBUFFER_UNLOCK(buffer
);
573 if (buffer
->own_lock
)
574 EVTHREAD_FREE_LOCK(buffer
->lock
, EVTHREAD_LOCKTYPE_RECURSIVE
);
579 evbuffer_free(struct evbuffer
*buffer
)
581 EVBUFFER_LOCK(buffer
);
582 _evbuffer_decref_and_unlock(buffer
);
586 evbuffer_lock(struct evbuffer
*buf
)
592 evbuffer_unlock(struct evbuffer
*buf
)
594 EVBUFFER_UNLOCK(buf
);
598 evbuffer_get_length(const struct evbuffer
*buffer
)
602 EVBUFFER_LOCK(buffer
);
604 result
= (buffer
->total_len
);
606 EVBUFFER_UNLOCK(buffer
);
612 evbuffer_get_contiguous_space(const struct evbuffer
*buf
)
614 struct evbuffer_chain
*chain
;
619 result
= (chain
!= NULL
? chain
->off
: 0);
620 EVBUFFER_UNLOCK(buf
);
626 evbuffer_reserve_space(struct evbuffer
*buf
, ev_ssize_t size
,
627 struct evbuffer_iovec
*vec
, int n_vecs
)
629 struct evbuffer_chain
*chain
, **chainp
;
638 if ((chain
= evbuffer_expand_singlechain(buf
, size
)) == NULL
)
641 vec
[0].iov_base
= CHAIN_SPACE_PTR(chain
);
642 vec
[0].iov_len
= (size_t) CHAIN_SPACE_LEN(chain
);
643 EVUTIL_ASSERT(size
<0 || (size_t)vec
[0].iov_len
>= (size_t)size
);
646 if (_evbuffer_expand_fast(buf
, size
, n_vecs
)<0)
648 n
= _evbuffer_read_setup_vecs(buf
, size
, vec
, n_vecs
,
653 EVBUFFER_UNLOCK(buf
);
659 advance_last_with_data(struct evbuffer
*buf
)
662 ASSERT_EVBUFFER_LOCKED(buf
);
664 if (!*buf
->last_with_datap
)
667 while ((*buf
->last_with_datap
)->next
&& (*buf
->last_with_datap
)->next
->off
) {
668 buf
->last_with_datap
= &(*buf
->last_with_datap
)->next
;
675 evbuffer_commit_space(struct evbuffer
*buf
,
676 struct evbuffer_iovec
*vec
, int n_vecs
)
678 struct evbuffer_chain
*chain
, **firstchainp
, **chainp
;
690 } else if (n_vecs
== 1 &&
691 (buf
->last
&& vec
[0].iov_base
== (void*)CHAIN_SPACE_PTR(buf
->last
))) {
692 /* The user only got or used one chain; it might not
693 * be the first one with space in it. */
694 if ((size_t)vec
[0].iov_len
> (size_t)CHAIN_SPACE_LEN(buf
->last
))
696 buf
->last
->off
+= vec
[0].iov_len
;
697 added
= vec
[0].iov_len
;
699 advance_last_with_data(buf
);
703 /* Advance 'firstchain' to the first chain with space in it. */
704 firstchainp
= buf
->last_with_datap
;
707 if (CHAIN_SPACE_LEN(*firstchainp
) == 0) {
708 firstchainp
= &(*firstchainp
)->next
;
711 chain
= *firstchainp
;
712 /* pass 1: make sure that the pointers and lengths of vecs[] are in
713 * bounds before we try to commit anything. */
714 for (i
=0; i
<n_vecs
; ++i
) {
717 if (vec
[i
].iov_base
!= (void*)CHAIN_SPACE_PTR(chain
) ||
718 (size_t)vec
[i
].iov_len
> CHAIN_SPACE_LEN(chain
))
722 /* pass 2: actually adjust all the chains. */
723 chainp
= firstchainp
;
724 for (i
=0; i
<n_vecs
; ++i
) {
725 (*chainp
)->off
+= vec
[i
].iov_len
;
726 added
+= vec
[i
].iov_len
;
727 if (vec
[i
].iov_len
) {
728 buf
->last_with_datap
= chainp
;
730 chainp
= &(*chainp
)->next
;
734 buf
->total_len
+= added
;
735 buf
->n_add_for_cb
+= added
;
737 evbuffer_invoke_callbacks(buf
);
740 EVBUFFER_UNLOCK(buf
);
745 HAS_PINNED_R(struct evbuffer
*buf
)
747 return (buf
->last
&& CHAIN_PINNED_R(buf
->last
));
751 ZERO_CHAIN(struct evbuffer
*dst
)
753 ASSERT_EVBUFFER_LOCKED(dst
);
756 dst
->last_with_datap
= &(dst
)->first
;
760 /* Prepares the contents of src to be moved to another buffer by removing
761 * read-pinned chains. The first pinned chain is saved in first, and the
762 * last in last. If src has no read-pinned chains, first and last are set
765 PRESERVE_PINNED(struct evbuffer
*src
, struct evbuffer_chain
**first
,
766 struct evbuffer_chain
**last
)
768 struct evbuffer_chain
*chain
, **pinned
;
770 ASSERT_EVBUFFER_LOCKED(src
);
772 if (!HAS_PINNED_R(src
)) {
773 *first
= *last
= NULL
;
777 pinned
= src
->last_with_datap
;
778 if (!CHAIN_PINNED_R(*pinned
))
779 pinned
= &(*pinned
)->next
;
780 EVUTIL_ASSERT(CHAIN_PINNED_R(*pinned
));
781 chain
= *first
= *pinned
;
784 /* If there's data in the first pinned chain, we need to allocate
785 * a new chain and copy the data over. */
787 struct evbuffer_chain
*tmp
;
789 EVUTIL_ASSERT(pinned
== src
->last_with_datap
);
790 tmp
= evbuffer_chain_new(chain
->off
);
793 memcpy(tmp
->buffer
, chain
->buffer
+ chain
->misalign
,
795 tmp
->off
= chain
->off
;
796 *src
->last_with_datap
= tmp
;
798 chain
->misalign
+= chain
->off
;
801 src
->last
= *src
->last_with_datap
;
809 RESTORE_PINNED(struct evbuffer
*src
, struct evbuffer_chain
*pinned
,
810 struct evbuffer_chain
*last
)
812 ASSERT_EVBUFFER_LOCKED(src
);
821 src
->last_with_datap
= &src
->first
;
826 COPY_CHAIN(struct evbuffer
*dst
, struct evbuffer
*src
)
828 ASSERT_EVBUFFER_LOCKED(dst
);
829 ASSERT_EVBUFFER_LOCKED(src
);
830 dst
->first
= src
->first
;
831 if (src
->last_with_datap
== &src
->first
)
832 dst
->last_with_datap
= &dst
->first
;
834 dst
->last_with_datap
= src
->last_with_datap
;
835 dst
->last
= src
->last
;
836 dst
->total_len
= src
->total_len
;
840 APPEND_CHAIN(struct evbuffer
*dst
, struct evbuffer
*src
)
842 ASSERT_EVBUFFER_LOCKED(dst
);
843 ASSERT_EVBUFFER_LOCKED(src
);
844 dst
->last
->next
= src
->first
;
845 if (src
->last_with_datap
== &src
->first
)
846 dst
->last_with_datap
= &dst
->last
->next
;
848 dst
->last_with_datap
= src
->last_with_datap
;
849 dst
->last
= src
->last
;
850 dst
->total_len
+= src
->total_len
;
854 PREPEND_CHAIN(struct evbuffer
*dst
, struct evbuffer
*src
)
856 ASSERT_EVBUFFER_LOCKED(dst
);
857 ASSERT_EVBUFFER_LOCKED(src
);
858 src
->last
->next
= dst
->first
;
859 dst
->first
= src
->first
;
860 dst
->total_len
+= src
->total_len
;
861 if (*dst
->last_with_datap
== NULL
) {
862 if (src
->last_with_datap
== &(src
)->first
)
863 dst
->last_with_datap
= &dst
->first
;
865 dst
->last_with_datap
= src
->last_with_datap
;
866 } else if (dst
->last_with_datap
== &dst
->first
) {
867 dst
->last_with_datap
= &src
->last
->next
;
872 evbuffer_add_buffer(struct evbuffer
*outbuf
, struct evbuffer
*inbuf
)
874 struct evbuffer_chain
*pinned
, *last
;
875 size_t in_total_len
, out_total_len
;
878 EVBUFFER_LOCK2(inbuf
, outbuf
);
879 in_total_len
= inbuf
->total_len
;
880 out_total_len
= outbuf
->total_len
;
882 if (in_total_len
== 0 || outbuf
== inbuf
)
885 if (outbuf
->freeze_end
|| inbuf
->freeze_start
) {
890 if (PRESERVE_PINNED(inbuf
, &pinned
, &last
) < 0) {
895 if (out_total_len
== 0) {
896 /* There might be an empty chain at the start of outbuf; free
898 evbuffer_free_all_chains(outbuf
->first
);
899 COPY_CHAIN(outbuf
, inbuf
);
901 APPEND_CHAIN(outbuf
, inbuf
);
904 RESTORE_PINNED(inbuf
, pinned
, last
);
906 inbuf
->n_del_for_cb
+= in_total_len
;
907 outbuf
->n_add_for_cb
+= in_total_len
;
909 evbuffer_invoke_callbacks(inbuf
);
910 evbuffer_invoke_callbacks(outbuf
);
913 EVBUFFER_UNLOCK2(inbuf
, outbuf
);
918 evbuffer_prepend_buffer(struct evbuffer
*outbuf
, struct evbuffer
*inbuf
)
920 struct evbuffer_chain
*pinned
, *last
;
921 size_t in_total_len
, out_total_len
;
924 EVBUFFER_LOCK2(inbuf
, outbuf
);
926 in_total_len
= inbuf
->total_len
;
927 out_total_len
= outbuf
->total_len
;
929 if (!in_total_len
|| inbuf
== outbuf
)
932 if (outbuf
->freeze_start
|| inbuf
->freeze_start
) {
937 if (PRESERVE_PINNED(inbuf
, &pinned
, &last
) < 0) {
942 if (out_total_len
== 0) {
943 /* There might be an empty chain at the start of outbuf; free
945 evbuffer_free_all_chains(outbuf
->first
);
946 COPY_CHAIN(outbuf
, inbuf
);
948 PREPEND_CHAIN(outbuf
, inbuf
);
951 RESTORE_PINNED(inbuf
, pinned
, last
);
953 inbuf
->n_del_for_cb
+= in_total_len
;
954 outbuf
->n_add_for_cb
+= in_total_len
;
956 evbuffer_invoke_callbacks(inbuf
);
957 evbuffer_invoke_callbacks(outbuf
);
959 EVBUFFER_UNLOCK2(inbuf
, outbuf
);
964 evbuffer_drain(struct evbuffer
*buf
, size_t len
)
966 struct evbuffer_chain
*chain
, *next
;
967 size_t remaining
, old_len
;
971 old_len
= buf
->total_len
;
976 if (buf
->freeze_start
) {
981 if (len
>= old_len
&& !HAS_PINNED_R(buf
)) {
983 for (chain
= buf
->first
; chain
!= NULL
; chain
= next
) {
985 evbuffer_chain_free(chain
);
993 buf
->total_len
-= len
;
995 for (chain
= buf
->first
;
996 remaining
>= chain
->off
;
999 remaining
-= chain
->off
;
1001 if (chain
== *buf
->last_with_datap
) {
1002 buf
->last_with_datap
= &buf
->first
;
1004 if (&chain
->next
== buf
->last_with_datap
)
1005 buf
->last_with_datap
= &buf
->first
;
1007 if (CHAIN_PINNED_R(chain
)) {
1008 EVUTIL_ASSERT(remaining
== 0);
1009 chain
->misalign
+= chain
->off
;
1013 evbuffer_chain_free(chain
);
1018 EVUTIL_ASSERT(remaining
<= chain
->off
);
1019 chain
->misalign
+= remaining
;
1020 chain
->off
-= remaining
;
1024 buf
->n_del_for_cb
+= len
;
1025 /* Tell someone about changes in this buffer */
1026 evbuffer_invoke_callbacks(buf
);
1029 EVBUFFER_UNLOCK(buf
);
1033 /* Reads data from an event buffer and drains the bytes read */
1035 evbuffer_remove(struct evbuffer
*buf
, void *data_out
, size_t datlen
)
1039 n
= evbuffer_copyout(buf
, data_out
, datlen
);
1041 if (evbuffer_drain(buf
, n
)<0)
1044 EVBUFFER_UNLOCK(buf
);
1049 evbuffer_copyout(struct evbuffer
*buf
, void *data_out
, size_t datlen
)
1051 /*XXX fails badly on sendfile case. */
1052 struct evbuffer_chain
*chain
;
1053 char *data
= data_out
;
1055 ev_ssize_t result
= 0;
1061 if (datlen
>= buf
->total_len
)
1062 datlen
= buf
->total_len
;
1067 if (buf
->freeze_start
) {
1074 while (datlen
&& datlen
>= chain
->off
) {
1075 memcpy(data
, chain
->buffer
+ chain
->misalign
, chain
->off
);
1077 datlen
-= chain
->off
;
1079 chain
= chain
->next
;
1080 EVUTIL_ASSERT(chain
|| datlen
==0);
1084 EVUTIL_ASSERT(chain
);
1085 EVUTIL_ASSERT(datlen
<= chain
->off
);
1086 memcpy(data
, chain
->buffer
+ chain
->misalign
, datlen
);
1091 EVBUFFER_UNLOCK(buf
);
1095 /* reads data from the src buffer to the dst buffer, avoids memcpy as
1097 /* XXXX should return ev_ssize_t */
1099 evbuffer_remove_buffer(struct evbuffer
*src
, struct evbuffer
*dst
,
1102 /*XXX We should have an option to force this to be zero-copy.*/
1104 /*XXX can fail badly on sendfile case. */
1105 struct evbuffer_chain
*chain
, *previous
;
1109 EVBUFFER_LOCK2(src
, dst
);
1111 chain
= previous
= src
->first
;
1113 if (datlen
== 0 || dst
== src
) {
1118 if (dst
->freeze_end
|| src
->freeze_start
) {
1123 /* short-cut if there is no more data buffered */
1124 if (datlen
>= src
->total_len
) {
1125 datlen
= src
->total_len
;
1126 evbuffer_add_buffer(dst
, src
);
1127 result
= (int)datlen
; /*XXXX should return ev_ssize_t*/
1131 /* removes chains if possible */
1132 while (chain
->off
<= datlen
) {
1133 /* We can't remove the last with data from src unless we
1134 * remove all chains, in which case we would have done the if
1136 EVUTIL_ASSERT(chain
!= *src
->last_with_datap
);
1137 nread
+= chain
->off
;
1138 datlen
-= chain
->off
;
1140 if (src
->last_with_datap
== &chain
->next
)
1141 src
->last_with_datap
= &src
->first
;
1142 chain
= chain
->next
;
1146 /* we can remove the chain */
1147 struct evbuffer_chain
**chp
;
1148 chp
= evbuffer_free_trailing_empty_chains(dst
);
1150 if (dst
->first
== NULL
) {
1151 dst
->first
= src
->first
;
1155 dst
->last
= previous
;
1156 previous
->next
= NULL
;
1158 advance_last_with_data(dst
);
1160 dst
->total_len
+= nread
;
1161 dst
->n_add_for_cb
+= nread
;
1164 /* we know that there is more data in the src buffer than
1165 * we want to read, so we manually drain the chain */
1166 evbuffer_add(dst
, chain
->buffer
+ chain
->misalign
, datlen
);
1167 chain
->misalign
+= datlen
;
1168 chain
->off
-= datlen
;
1171 /* You might think we would want to increment dst->n_add_for_cb
1172 * here too. But evbuffer_add above already took care of that.
1174 src
->total_len
-= nread
;
1175 src
->n_del_for_cb
+= nread
;
1178 evbuffer_invoke_callbacks(dst
);
1179 evbuffer_invoke_callbacks(src
);
1181 result
= (int)nread
;/*XXXX should change return type */
1184 EVBUFFER_UNLOCK2(src
, dst
);
1189 evbuffer_pullup(struct evbuffer
*buf
, ev_ssize_t size
)
1191 struct evbuffer_chain
*chain
, *next
, *tmp
, *last_with_data
;
1192 unsigned char *buffer
, *result
= NULL
;
1193 ev_ssize_t remaining
;
1194 int removed_last_with_data
= 0;
1195 int removed_last_with_datap
= 0;
1202 size
= buf
->total_len
;
1203 /* if size > buf->total_len, we cannot guarantee to the user that she
1204 * is going to have a long enough buffer afterwards; so we return
1206 if (size
== 0 || (size_t)size
> buf
->total_len
)
1209 /* No need to pull up anything; the first size bytes are
1211 if (chain
->off
>= (size_t)size
) {
1212 result
= chain
->buffer
+ chain
->misalign
;
1216 /* Make sure that none of the chains we need to copy from is pinned. */
1217 remaining
= size
- chain
->off
;
1218 EVUTIL_ASSERT(remaining
>= 0);
1219 for (tmp
=chain
->next
; tmp
; tmp
=tmp
->next
) {
1220 if (CHAIN_PINNED(tmp
))
1222 if (tmp
->off
>= (size_t)remaining
)
1224 remaining
-= tmp
->off
;
1227 if (CHAIN_PINNED(chain
)) {
1228 size_t old_off
= chain
->off
;
1229 if (CHAIN_SPACE_LEN(chain
) < size
- chain
->off
) {
1230 /* not enough room at end of chunk. */
1233 buffer
= CHAIN_SPACE_PTR(chain
);
1237 chain
= chain
->next
;
1238 } else if (chain
->buffer_len
- chain
->misalign
>= (size_t)size
) {
1239 /* already have enough space in the first chain */
1240 size_t old_off
= chain
->off
;
1241 buffer
= chain
->buffer
+ chain
->misalign
+ chain
->off
;
1245 chain
= chain
->next
;
1247 if ((tmp
= evbuffer_chain_new(size
)) == NULL
) {
1248 event_warn("%s: out of memory", __func__
);
1251 buffer
= tmp
->buffer
;
1256 /* TODO(niels): deal with buffers that point to NULL like sendfile */
1258 /* Copy and free every chunk that will be entirely pulled into tmp */
1259 last_with_data
= *buf
->last_with_datap
;
1260 for (; chain
!= NULL
&& (size_t)size
>= chain
->off
; chain
= next
) {
1263 memcpy(buffer
, chain
->buffer
+ chain
->misalign
, chain
->off
);
1265 buffer
+= chain
->off
;
1266 if (chain
== last_with_data
)
1267 removed_last_with_data
= 1;
1268 if (&chain
->next
== buf
->last_with_datap
)
1269 removed_last_with_datap
= 1;
1271 evbuffer_chain_free(chain
);
1274 if (chain
!= NULL
) {
1275 memcpy(buffer
, chain
->buffer
+ chain
->misalign
, size
);
1276 chain
->misalign
+= size
;
1284 if (removed_last_with_data
) {
1285 buf
->last_with_datap
= &buf
->first
;
1286 } else if (removed_last_with_datap
) {
1287 if (buf
->first
->next
&& buf
->first
->next
->off
)
1288 buf
->last_with_datap
= &buf
->first
->next
;
1290 buf
->last_with_datap
= &buf
->first
;
1293 result
= (tmp
->buffer
+ tmp
->misalign
);
1296 EVBUFFER_UNLOCK(buf
);
1301 * Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'.
1302 * The returned buffer needs to be freed by the called.
1305 evbuffer_readline(struct evbuffer
*buffer
)
1307 return evbuffer_readln(buffer
, NULL
, EVBUFFER_EOL_ANY
);
1310 static inline ev_ssize_t
1311 evbuffer_strchr(struct evbuffer_ptr
*it
, const char chr
)
1313 struct evbuffer_chain
*chain
= it
->_internal
.chain
;
1314 size_t i
= it
->_internal
.pos_in_chain
;
1315 while (chain
!= NULL
) {
1316 char *buffer
= (char *)chain
->buffer
+ chain
->misalign
;
1317 char *cp
= memchr(buffer
+i
, chr
, chain
->off
-i
);
1319 it
->_internal
.chain
= chain
;
1320 it
->_internal
.pos_in_chain
= cp
- buffer
;
1321 it
->pos
+= (cp
- buffer
- i
);
1324 it
->pos
+= chain
->off
- i
;
1326 chain
= chain
->next
;
1332 static inline char *
1333 find_eol_char(char *s
, size_t len
)
1335 #define CHUNK_SZ 128
1336 /* Lots of benchmarking found this approach to be faster in practice
1337 * than doing two memchrs over the whole buffer, doin a memchr on each
1338 * char of the buffer, or trying to emulate memchr by hand. */
1339 char *s_end
, *cr
, *lf
;
1342 size_t chunk
= (s
+ CHUNK_SZ
< s_end
) ? CHUNK_SZ
: (s_end
- s
);
1343 cr
= memchr(s
, '\r', chunk
);
1344 lf
= memchr(s
, '\n', chunk
);
1360 evbuffer_find_eol_char(struct evbuffer_ptr
*it
)
1362 struct evbuffer_chain
*chain
= it
->_internal
.chain
;
1363 size_t i
= it
->_internal
.pos_in_chain
;
1364 while (chain
!= NULL
) {
1365 char *buffer
= (char *)chain
->buffer
+ chain
->misalign
;
1366 char *cp
= find_eol_char(buffer
+i
, chain
->off
-i
);
1368 it
->_internal
.chain
= chain
;
1369 it
->_internal
.pos_in_chain
= cp
- buffer
;
1370 it
->pos
+= (cp
- buffer
) - i
;
1373 it
->pos
+= chain
->off
- i
;
1375 chain
= chain
->next
;
1383 struct evbuffer_ptr
*ptr
, const char *chrset
)
1386 struct evbuffer_chain
*chain
= ptr
->_internal
.chain
;
1387 size_t i
= ptr
->_internal
.pos_in_chain
;
1393 char *buffer
= (char *)chain
->buffer
+ chain
->misalign
;
1394 for (; i
< chain
->off
; ++i
) {
1395 const char *p
= chrset
;
1397 if (buffer
[i
] == *p
++)
1400 ptr
->_internal
.chain
= chain
;
1401 ptr
->_internal
.pos_in_chain
= i
;
1409 if (! chain
->next
) {
1410 ptr
->_internal
.chain
= chain
;
1411 ptr
->_internal
.pos_in_chain
= i
;
1416 chain
= chain
->next
;
1422 evbuffer_getchr(struct evbuffer_ptr
*it
)
1424 struct evbuffer_chain
*chain
= it
->_internal
.chain
;
1425 size_t off
= it
->_internal
.pos_in_chain
;
1427 return chain
->buffer
[chain
->misalign
+ off
];
1431 evbuffer_search_eol(struct evbuffer
*buffer
,
1432 struct evbuffer_ptr
*start
, size_t *eol_len_out
,
1433 enum evbuffer_eol_style eol_style
)
1435 struct evbuffer_ptr it
, it2
;
1436 size_t extra_drain
= 0;
1439 EVBUFFER_LOCK(buffer
);
1442 memcpy(&it
, start
, sizeof(it
));
1445 it
._internal
.chain
= buffer
->first
;
1446 it
._internal
.pos_in_chain
= 0;
1449 /* the eol_style determines our first stop character and how many
1450 * characters we are going to drain afterwards. */
1451 switch (eol_style
) {
1452 case EVBUFFER_EOL_ANY
:
1453 if (evbuffer_find_eol_char(&it
) < 0)
1455 memcpy(&it2
, &it
, sizeof(it
));
1456 extra_drain
= evbuffer_strspn(&it2
, "\r\n");
1458 case EVBUFFER_EOL_CRLF_STRICT
: {
1459 it
= evbuffer_search(buffer
, "\r\n", 2, &it
);
1465 case EVBUFFER_EOL_CRLF
:
1467 if (evbuffer_find_eol_char(&it
) < 0)
1469 if (evbuffer_getchr(&it
) == '\n') {
1472 } else if (!evbuffer_ptr_memcmp(
1473 buffer
, &it
, "\r\n", 2)) {
1477 if (evbuffer_ptr_set(buffer
, &it
, 1,
1478 EVBUFFER_PTR_ADD
)<0)
1483 case EVBUFFER_EOL_LF
:
1484 if (evbuffer_strchr(&it
, '\n') < 0)
1494 EVBUFFER_UNLOCK(buffer
);
1500 *eol_len_out
= extra_drain
;
1506 evbuffer_readln(struct evbuffer
*buffer
, size_t *n_read_out
,
1507 enum evbuffer_eol_style eol_style
)
1509 struct evbuffer_ptr it
;
1511 size_t n_to_copy
=0, extra_drain
=0;
1512 char *result
= NULL
;
1514 EVBUFFER_LOCK(buffer
);
1516 if (buffer
->freeze_start
) {
1520 it
= evbuffer_search_eol(buffer
, NULL
, &extra_drain
, eol_style
);
1525 if ((line
= mm_malloc(n_to_copy
+1)) == NULL
) {
1526 event_warn("%s: out of memory", __func__
);
1530 evbuffer_remove(buffer
, line
, n_to_copy
);
1531 line
[n_to_copy
] = '\0';
1533 evbuffer_drain(buffer
, extra_drain
);
1536 EVBUFFER_UNLOCK(buffer
);
1539 *n_read_out
= result
? n_to_copy
: 0;
1544 #define EVBUFFER_CHAIN_MAX_AUTO_SIZE 4096
1546 /* Adds data to an event buffer */
1549 evbuffer_add(struct evbuffer
*buf
, const void *data_in
, size_t datlen
)
1551 struct evbuffer_chain
*chain
, *tmp
;
1552 const unsigned char *data
= data_in
;
1553 size_t remain
, to_alloc
;
1558 if (buf
->freeze_end
) {
1561 /* Prevent buf->total_len overflow */
1562 if (datlen
> EV_SIZE_MAX
- buf
->total_len
) {
1568 /* If there are no chains allocated for this buffer, allocate one
1569 * big enough to hold all the data. */
1570 if (chain
== NULL
) {
1571 chain
= evbuffer_chain_new(datlen
);
1574 evbuffer_chain_insert(buf
, chain
);
1577 if ((chain
->flags
& EVBUFFER_IMMUTABLE
) == 0) {
1578 /* Always true for mutable buffers */
1579 EVUTIL_ASSERT(chain
->misalign
>= 0 &&
1580 (ev_uint64_t
)chain
->misalign
<= EVBUFFER_CHAIN_MAX
);
1581 remain
= chain
->buffer_len
- (size_t)chain
->misalign
- chain
->off
;
1582 if (remain
>= datlen
) {
1583 /* there's enough space to hold all the data in the
1584 * current last chain */
1585 memcpy(chain
->buffer
+ chain
->misalign
+ chain
->off
,
1587 chain
->off
+= datlen
;
1588 buf
->total_len
+= datlen
;
1589 buf
->n_add_for_cb
+= datlen
;
1591 } else if (!CHAIN_PINNED(chain
) &&
1592 evbuffer_chain_should_realign(chain
, datlen
)) {
1593 /* we can fit the data into the misalignment */
1594 evbuffer_chain_align(chain
);
1596 memcpy(chain
->buffer
+ chain
->off
, data
, datlen
);
1597 chain
->off
+= datlen
;
1598 buf
->total_len
+= datlen
;
1599 buf
->n_add_for_cb
+= datlen
;
1603 /* we cannot write any data to the last chain */
1607 /* we need to add another chain */
1608 to_alloc
= chain
->buffer_len
;
1609 if (to_alloc
<= EVBUFFER_CHAIN_MAX_AUTO_SIZE
/2)
1611 if (datlen
> to_alloc
)
1613 tmp
= evbuffer_chain_new(to_alloc
);
1618 memcpy(chain
->buffer
+ chain
->misalign
+ chain
->off
,
1620 chain
->off
+= remain
;
1621 buf
->total_len
+= remain
;
1622 buf
->n_add_for_cb
+= remain
;
1628 memcpy(tmp
->buffer
, data
, datlen
);
1630 evbuffer_chain_insert(buf
, tmp
);
1631 buf
->n_add_for_cb
+= datlen
;
1634 evbuffer_invoke_callbacks(buf
);
1637 EVBUFFER_UNLOCK(buf
);
1642 evbuffer_prepend(struct evbuffer
*buf
, const void *data
, size_t datlen
)
1644 struct evbuffer_chain
*chain
, *tmp
;
1649 if (buf
->freeze_start
) {
1652 if (datlen
> EV_SIZE_MAX
- buf
->total_len
) {
1658 if (chain
== NULL
) {
1659 chain
= evbuffer_chain_new(datlen
);
1662 evbuffer_chain_insert(buf
, chain
);
1665 /* we cannot touch immutable buffers */
1666 if ((chain
->flags
& EVBUFFER_IMMUTABLE
) == 0) {
1667 /* Always true for mutable buffers */
1668 EVUTIL_ASSERT(chain
->misalign
>= 0 &&
1669 (ev_uint64_t
)chain
->misalign
<= EVBUFFER_CHAIN_MAX
);
1671 /* If this chain is empty, we can treat it as
1672 * 'empty at the beginning' rather than 'empty at the end' */
1673 if (chain
->off
== 0)
1674 chain
->misalign
= chain
->buffer_len
;
1676 if ((size_t)chain
->misalign
>= datlen
) {
1677 /* we have enough space to fit everything */
1678 memcpy(chain
->buffer
+ chain
->misalign
- datlen
,
1680 chain
->off
+= datlen
;
1681 chain
->misalign
-= datlen
;
1682 buf
->total_len
+= datlen
;
1683 buf
->n_add_for_cb
+= datlen
;
1685 } else if (chain
->misalign
) {
1686 /* we can only fit some of the data. */
1687 memcpy(chain
->buffer
,
1688 (const char*)data
+ datlen
- chain
->misalign
,
1689 (size_t)chain
->misalign
);
1690 chain
->off
+= (size_t)chain
->misalign
;
1691 buf
->total_len
+= (size_t)chain
->misalign
;
1692 buf
->n_add_for_cb
+= (size_t)chain
->misalign
;
1693 datlen
-= (size_t)chain
->misalign
;
1694 chain
->misalign
= 0;
1698 /* we need to add another chain */
1699 if ((tmp
= evbuffer_chain_new(datlen
)) == NULL
)
1702 if (buf
->last_with_datap
== &buf
->first
)
1703 buf
->last_with_datap
= &tmp
->next
;
1708 EVUTIL_ASSERT(datlen
<= tmp
->buffer_len
);
1709 tmp
->misalign
= tmp
->buffer_len
- datlen
;
1711 memcpy(tmp
->buffer
+ tmp
->misalign
, data
, datlen
);
1712 buf
->total_len
+= datlen
;
1713 buf
->n_add_for_cb
+= (size_t)chain
->misalign
;
1716 evbuffer_invoke_callbacks(buf
);
1719 EVBUFFER_UNLOCK(buf
);
1723 /** Helper: realigns the memory in chain->buffer so that misalign is 0. */
1725 evbuffer_chain_align(struct evbuffer_chain
*chain
)
1727 EVUTIL_ASSERT(!(chain
->flags
& EVBUFFER_IMMUTABLE
));
1728 EVUTIL_ASSERT(!(chain
->flags
& EVBUFFER_MEM_PINNED_ANY
));
1729 memmove(chain
->buffer
, chain
->buffer
+ chain
->misalign
, chain
->off
);
1730 chain
->misalign
= 0;
1733 #define MAX_TO_COPY_IN_EXPAND 4096
1734 #define MAX_TO_REALIGN_IN_EXPAND 2048
1736 /** Helper: return true iff we should realign chain to fit datalen bytes of
1739 evbuffer_chain_should_realign(struct evbuffer_chain
*chain
,
1742 return chain
->buffer_len
- chain
->off
>= datlen
&&
1743 (chain
->off
< chain
->buffer_len
/ 2) &&
1744 (chain
->off
<= MAX_TO_REALIGN_IN_EXPAND
);
1747 /* Expands the available space in the event buffer to at least datlen, all in
1748 * a single chunk. Return that chunk. */
1749 static struct evbuffer_chain
*
1750 evbuffer_expand_singlechain(struct evbuffer
*buf
, size_t datlen
)
1752 struct evbuffer_chain
*chain
, **chainp
;
1753 struct evbuffer_chain
*result
= NULL
;
1754 ASSERT_EVBUFFER_LOCKED(buf
);
1756 chainp
= buf
->last_with_datap
;
1758 /* XXX If *chainp is no longer writeable, but has enough space in its
1759 * misalign, this might be a bad idea: we could still use *chainp, not
1760 * (*chainp)->next. */
1761 if (*chainp
&& CHAIN_SPACE_LEN(*chainp
) == 0)
1762 chainp
= &(*chainp
)->next
;
1764 /* 'chain' now points to the first chain with writable space (if any)
1765 * We will either use it, realign it, replace it, or resize it. */
1768 if (chain
== NULL
||
1769 (chain
->flags
& (EVBUFFER_IMMUTABLE
|EVBUFFER_MEM_PINNED_ANY
))) {
1770 /* We can't use the last_with_data chain at all. Just add a
1771 * new one that's big enough. */
1775 /* If we can fit all the data, then we don't have to do anything */
1776 if (CHAIN_SPACE_LEN(chain
) >= datlen
) {
1781 /* If the chain is completely empty, just replace it by adding a new
1783 if (chain
->off
== 0) {
1787 /* If the misalignment plus the remaining space fulfills our data
1788 * needs, we could just force an alignment to happen. Afterwards, we
1789 * have enough space. But only do this if we're saving a lot of space
1790 * and not moving too much data. Otherwise the space savings are
1791 * probably offset by the time lost in copying.
1793 if (evbuffer_chain_should_realign(chain
, datlen
)) {
1794 evbuffer_chain_align(chain
);
1799 /* At this point, we can either resize the last chunk with space in
1800 * it, use the next chunk after it, or If we add a new chunk, we waste
1801 * CHAIN_SPACE_LEN(chain) bytes in the former last chunk. If we
1802 * resize, we have to copy chain->off bytes.
1805 /* Would expanding this chunk be affordable and worthwhile? */
1806 if (CHAIN_SPACE_LEN(chain
) < chain
->buffer_len
/ 8 ||
1807 chain
->off
> MAX_TO_COPY_IN_EXPAND
||
1808 (datlen
< EVBUFFER_CHAIN_MAX
&&
1809 EVBUFFER_CHAIN_MAX
- datlen
>= chain
->off
)) {
1810 /* It's not worth resizing this chain. Can the next one be
1812 if (chain
->next
&& CHAIN_SPACE_LEN(chain
->next
) >= datlen
) {
1813 /* Yes, we can just use the next chain (which should
1815 result
= chain
->next
;
1818 /* No; append a new chain (which will free all
1819 * terminal empty chains.) */
1823 /* Okay, we're going to try to resize this chain: Not doing so
1824 * would waste at least 1/8 of its current allocation, and we
1825 * can do so without having to copy more than
1826 * MAX_TO_COPY_IN_EXPAND bytes. */
1827 /* figure out how much space we need */
1828 size_t length
= chain
->off
+ datlen
;
1829 struct evbuffer_chain
*tmp
= evbuffer_chain_new(length
);
1833 /* copy the data over that we had so far */
1834 tmp
->off
= chain
->off
;
1835 memcpy(tmp
->buffer
, chain
->buffer
+ chain
->misalign
,
1837 /* fix up the list */
1838 EVUTIL_ASSERT(*chainp
== chain
);
1839 result
= *chainp
= tmp
;
1841 if (buf
->last
== chain
)
1844 tmp
->next
= chain
->next
;
1845 evbuffer_chain_free(chain
);
1850 result
= evbuffer_chain_insert_new(buf
, datlen
);
1854 EVUTIL_ASSERT(result
);
1855 EVUTIL_ASSERT(CHAIN_SPACE_LEN(result
) >= datlen
);
1860 /* Make sure that datlen bytes are available for writing in the last n
1861 * chains. Never copies or moves data. */
1863 _evbuffer_expand_fast(struct evbuffer
*buf
, size_t datlen
, int n
)
1865 struct evbuffer_chain
*chain
= buf
->last
, *tmp
, *next
;
1869 ASSERT_EVBUFFER_LOCKED(buf
);
1870 EVUTIL_ASSERT(n
>= 2);
1872 if (chain
== NULL
|| (chain
->flags
& EVBUFFER_IMMUTABLE
)) {
1873 /* There is no last chunk, or we can't touch the last chunk.
1874 * Just add a new chunk. */
1875 chain
= evbuffer_chain_new(datlen
);
1879 evbuffer_chain_insert(buf
, chain
);
1883 used
= 0; /* number of chains we're using space in. */
1884 avail
= 0; /* how much space they have. */
1885 /* How many bytes can we stick at the end of buffer as it is? Iterate
1886 * over the chains at the end of the buffer, tring to see how much
1887 * space we have in the first n. */
1888 for (chain
= *buf
->last_with_datap
; chain
; chain
= chain
->next
) {
1890 size_t space
= (size_t) CHAIN_SPACE_LEN(chain
);
1891 EVUTIL_ASSERT(chain
== *buf
->last_with_datap
);
1897 /* No data in chain; realign it. */
1898 chain
->misalign
= 0;
1899 avail
+= chain
->buffer_len
;
1902 if (avail
>= datlen
) {
1903 /* There is already enough space. Just return */
1910 /* There wasn't enough space in the first n chains with space in
1911 * them. Either add a new chain with enough space, or replace all
1912 * empty chains with one that has enough space, depending on n. */
1914 /* The loop ran off the end of the chains before it hit n
1915 * chains; we can add another. */
1916 EVUTIL_ASSERT(chain
== NULL
);
1918 tmp
= evbuffer_chain_new(datlen
- avail
);
1922 buf
->last
->next
= tmp
;
1924 /* (we would only set last_with_data if we added the first
1925 * chain. But if the buffer had no chains, we would have
1926 * just allocated a new chain earlier) */
1929 /* Nuke _all_ the empty chains. */
1930 int rmv_all
= 0; /* True iff we removed last_with_data. */
1931 chain
= *buf
->last_with_datap
;
1933 EVUTIL_ASSERT(chain
== buf
->first
);
1937 /* can't overflow, since only mutable chains have
1938 * huge misaligns. */
1939 avail
= (size_t) CHAIN_SPACE_LEN(chain
);
1940 chain
= chain
->next
;
1944 for (; chain
; chain
= next
) {
1946 EVUTIL_ASSERT(chain
->off
== 0);
1947 evbuffer_chain_free(chain
);
1949 EVUTIL_ASSERT(datlen
>= avail
);
1950 tmp
= evbuffer_chain_new(datlen
- avail
);
1955 buf
->last
= *buf
->last_with_datap
;
1956 (*buf
->last_with_datap
)->next
= NULL
;
1962 buf
->first
= buf
->last
= tmp
;
1963 buf
->last_with_datap
= &buf
->first
;
1965 (*buf
->last_with_datap
)->next
= tmp
;
1973 evbuffer_expand(struct evbuffer
*buf
, size_t datlen
)
1975 struct evbuffer_chain
*chain
;
1978 chain
= evbuffer_expand_singlechain(buf
, datlen
);
1979 EVBUFFER_UNLOCK(buf
);
1980 return chain
? 0 : -1;
1984 * Reads data from a file descriptor into a buffer.
1987 #if defined(_EVENT_HAVE_SYS_UIO_H) || defined(WIN32)
1988 #define USE_IOVEC_IMPL
1991 #ifdef USE_IOVEC_IMPL
1993 #ifdef _EVENT_HAVE_SYS_UIO_H
1994 /* number of iovec we use for writev, fragmentation is going to determine
1995 * how much we end up writing */
1997 #define DEFAULT_WRITE_IOVEC 128
1999 #if defined(UIO_MAXIOV) && UIO_MAXIOV < DEFAULT_WRITE_IOVEC
2000 #define NUM_WRITE_IOVEC UIO_MAXIOV
2001 #elif defined(IOV_MAX) && IOV_MAX < DEFAULT_WRITE_IOVEC
2002 #define NUM_WRITE_IOVEC IOV_MAX
2004 #define NUM_WRITE_IOVEC DEFAULT_WRITE_IOVEC
2007 #define IOV_TYPE struct iovec
2008 #define IOV_PTR_FIELD iov_base
2009 #define IOV_LEN_FIELD iov_len
2010 #define IOV_LEN_TYPE size_t
2012 #define NUM_WRITE_IOVEC 16
2013 #define IOV_TYPE WSABUF
2014 #define IOV_PTR_FIELD buf
2015 #define IOV_LEN_FIELD len
2016 #define IOV_LEN_TYPE unsigned long
2019 #define NUM_READ_IOVEC 4
2021 #define EVBUFFER_MAX_READ 4096
2023 /** Helper function to figure out which space to use for reading data into
2024 an evbuffer. Internal use only.
2026 @param buf The buffer to read into
2027 @param howmuch How much we want to read.
2028 @param vecs An array of two or more iovecs or WSABUFs.
2029 @param n_vecs_avail The length of vecs
2030 @param chainp A pointer to a variable to hold the first chain we're
2032 @param exact Boolean: if true, we do not provide more than 'howmuch'
2033 space in the vectors, even if more space is available.
2034 @return The number of buffers we're using.
2037 _evbuffer_read_setup_vecs(struct evbuffer
*buf
, ev_ssize_t howmuch
,
2038 struct evbuffer_iovec
*vecs
, int n_vecs_avail
,
2039 struct evbuffer_chain
***chainp
, int exact
)
2041 struct evbuffer_chain
*chain
;
2042 struct evbuffer_chain
**firstchainp
;
2045 ASSERT_EVBUFFER_LOCKED(buf
);
2051 /* Let firstchain be the first chain with any space on it */
2052 firstchainp
= buf
->last_with_datap
;
2053 if (CHAIN_SPACE_LEN(*firstchainp
) == 0) {
2054 firstchainp
= &(*firstchainp
)->next
;
2057 chain
= *firstchainp
;
2058 for (i
= 0; i
< n_vecs_avail
&& so_far
< (size_t)howmuch
; ++i
) {
2059 size_t avail
= (size_t) CHAIN_SPACE_LEN(chain
);
2060 if (avail
> (howmuch
- so_far
) && exact
)
2061 avail
= howmuch
- so_far
;
2062 vecs
[i
].iov_base
= CHAIN_SPACE_PTR(chain
);
2063 vecs
[i
].iov_len
= avail
;
2065 chain
= chain
->next
;
2068 *chainp
= firstchainp
;
2073 get_n_bytes_readable_on_socket(evutil_socket_t fd
)
2075 #if defined(FIONREAD) && defined(WIN32)
2076 unsigned long lng
= EVBUFFER_MAX_READ
;
2077 if (ioctlsocket(fd
, FIONREAD
, &lng
) < 0)
2079 /* Can overflow, but mostly harmlessly. XXXX */
2081 #elif defined(FIONREAD)
2082 int n
= EVBUFFER_MAX_READ
;
2083 if (ioctl(fd
, FIONREAD
, &n
) < 0)
2087 return EVBUFFER_MAX_READ
;
2091 /* TODO(niels): should this function return ev_ssize_t and take ev_ssize_t
2094 evbuffer_read(struct evbuffer
*buf
, evutil_socket_t fd
, int howmuch
)
2096 struct evbuffer_chain
**chainp
;
2100 #ifdef USE_IOVEC_IMPL
2101 int nvecs
, i
, remaining
;
2103 struct evbuffer_chain
*chain
;
2109 if (buf
->freeze_end
) {
2114 n
= get_n_bytes_readable_on_socket(fd
);
2115 if (n
<= 0 || n
> EVBUFFER_MAX_READ
)
2116 n
= EVBUFFER_MAX_READ
;
2117 if (howmuch
< 0 || howmuch
> n
)
2120 #ifdef USE_IOVEC_IMPL
2121 /* Since we can use iovecs, we're willing to use the last
2122 * NUM_READ_IOVEC chains. */
2123 if (_evbuffer_expand_fast(buf
, howmuch
, NUM_READ_IOVEC
) == -1) {
2127 IOV_TYPE vecs
[NUM_READ_IOVEC
];
2128 #ifdef _EVBUFFER_IOVEC_IS_NATIVE
2129 nvecs
= _evbuffer_read_setup_vecs(buf
, howmuch
, vecs
,
2130 NUM_READ_IOVEC
, &chainp
, 1);
2132 /* We aren't using the native struct iovec. Therefore,
2134 struct evbuffer_iovec ev_vecs
[NUM_READ_IOVEC
];
2135 nvecs
= _evbuffer_read_setup_vecs(buf
, howmuch
, ev_vecs
, 2,
2138 for (i
=0; i
< nvecs
; ++i
)
2139 WSABUF_FROM_EVBUFFER_IOV(&vecs
[i
], &ev_vecs
[i
]);
2146 if (WSARecv(fd
, vecs
, nvecs
, &bytesRead
, &flags
, NULL
, NULL
)) {
2147 /* The read failed. It might be a close,
2148 * or it might be an error. */
2149 if (WSAGetLastError() == WSAECONNABORTED
)
2157 n
= readv(fd
, vecs
, nvecs
);
2161 #else /*!USE_IOVEC_IMPL*/
2162 /* If we don't have FIONREAD, we might waste some space here */
2163 /* XXX we _will_ waste some space here if there is any space left
2164 * over on buf->last. */
2165 if ((chain
= evbuffer_expand_singlechain(buf
, howmuch
)) == NULL
) {
2170 /* We can append new data at this point */
2171 p
= chain
->buffer
+ chain
->misalign
+ chain
->off
;
2174 n
= read(fd
, p
, howmuch
);
2176 n
= recv(fd
, p
, howmuch
, 0);
2178 #endif /* USE_IOVEC_IMPL */
2189 #ifdef USE_IOVEC_IMPL
2191 for (i
=0; i
< nvecs
; ++i
) {
2192 /* can't overflow, since only mutable chains have
2193 * huge misaligns. */
2194 size_t space
= (size_t) CHAIN_SPACE_LEN(*chainp
);
2195 /* XXXX This is a kludge that can waste space in perverse
2197 if (space
> EVBUFFER_CHAIN_MAX
)
2198 space
= EVBUFFER_CHAIN_MAX
;
2199 if ((ev_ssize_t
)space
< remaining
) {
2200 (*chainp
)->off
+= space
;
2201 remaining
-= (int)space
;
2203 (*chainp
)->off
+= remaining
;
2204 buf
->last_with_datap
= chainp
;
2207 chainp
= &(*chainp
)->next
;
2211 advance_last_with_data(buf
);
2213 buf
->total_len
+= n
;
2214 buf
->n_add_for_cb
+= n
;
2216 /* Tell someone about changes in this buffer */
2217 evbuffer_invoke_callbacks(buf
);
2220 EVBUFFER_UNLOCK(buf
);
2226 evbuffer_readfile(struct evbuffer
*buf
, evutil_socket_t fd
, ev_ssize_t howmuch
)
2230 struct evbuffer_iovec v
[2];
2234 if (buf
->freeze_end
) {
2243 /* XXX we _will_ waste some space here if there is any space left
2244 * over on buf->last. */
2245 nchains
= evbuffer_reserve_space(buf
, howmuch
, v
, 2);
2246 if (nchains
< 1 || nchains
> 2) {
2250 n
= read((int)fd
, v
[0].iov_base
, (unsigned int)v
[0].iov_len
);
2255 v
[0].iov_len
= (IOV_LEN_TYPE
) n
; /* XXXX another problem with big n.*/
2257 n
= read((int)fd
, v
[1].iov_base
, (unsigned int)v
[1].iov_len
);
2259 result
= (unsigned long) v
[0].iov_len
;
2260 evbuffer_commit_space(buf
, v
, 1);
2265 evbuffer_commit_space(buf
, v
, nchains
);
2269 EVBUFFER_UNLOCK(buf
);
2274 #ifdef USE_IOVEC_IMPL
2276 evbuffer_write_iovec(struct evbuffer
*buffer
, evutil_socket_t fd
,
2279 IOV_TYPE iov
[NUM_WRITE_IOVEC
];
2280 struct evbuffer_chain
*chain
= buffer
->first
;
2286 ASSERT_EVBUFFER_LOCKED(buffer
);
2287 /* XXX make this top out at some maximal data length? if the
2288 * buffer has (say) 1MB in it, split over 128 chains, there's
2289 * no way it all gets written in one go. */
2290 while (chain
!= NULL
&& i
< NUM_WRITE_IOVEC
&& howmuch
) {
2292 /* we cannot write the file info via writev */
2293 if (chain
->flags
& EVBUFFER_SENDFILE
)
2296 iov
[i
].IOV_PTR_FIELD
= (void *) (chain
->buffer
+ chain
->misalign
);
2297 if ((size_t)howmuch
>= chain
->off
) {
2298 /* XXXcould be problematic when windows supports mmap*/
2299 iov
[i
++].IOV_LEN_FIELD
= (IOV_LEN_TYPE
)chain
->off
;
2300 howmuch
-= chain
->off
;
2302 /* XXXcould be problematic when windows supports mmap*/
2303 iov
[i
++].IOV_LEN_FIELD
= (IOV_LEN_TYPE
)howmuch
;
2306 chain
= chain
->next
;
2313 if (WSASend(fd
, iov
, i
, &bytesSent
, 0, NULL
, NULL
))
2319 n
= writev(fd
, iov
, i
);
2327 evbuffer_write_sendfile(struct evbuffer
*buffer
, evutil_socket_t fd
,
2330 struct evbuffer_chain
*chain
= buffer
->first
;
2331 struct evbuffer_chain_fd
*info
=
2332 EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd
, chain
);
2333 #if defined(SENDFILE_IS_MACOSX) || defined(SENDFILE_IS_FREEBSD)
2335 off_t len
= chain
->off
;
2336 #elif defined(SENDFILE_IS_LINUX) || defined(SENDFILE_IS_SOLARIS)
2338 off_t offset
= chain
->misalign
;
2341 ASSERT_EVBUFFER_LOCKED(buffer
);
2343 #if defined(SENDFILE_IS_MACOSX)
2344 res
= sendfile(info
->fd
, fd
, chain
->misalign
, &len
, NULL
, 0);
2345 if (res
== -1 && !EVUTIL_ERR_RW_RETRIABLE(errno
))
2349 #elif defined(SENDFILE_IS_FREEBSD)
2350 res
= sendfile(info
->fd
, fd
, chain
->misalign
, chain
->off
, NULL
, &len
, 0);
2351 if (res
== -1 && !EVUTIL_ERR_RW_RETRIABLE(errno
))
2355 #elif defined(SENDFILE_IS_LINUX)
2356 /* TODO(niels): implement splice */
2357 res
= sendfile(fd
, info
->fd
, &offset
, chain
->off
);
2358 if (res
== -1 && EVUTIL_ERR_RW_RETRIABLE(errno
)) {
2359 /* if this is EAGAIN or EINTR return 0; otherwise, -1 */
2363 #elif defined(SENDFILE_IS_SOLARIS)
2365 const off_t offset_orig
= offset
;
2366 res
= sendfile(fd
, info
->fd
, &offset
, chain
->off
);
2367 if (res
== -1 && EVUTIL_ERR_RW_RETRIABLE(errno
)) {
2368 if (offset
- offset_orig
)
2369 return offset
- offset_orig
;
2370 /* if this is EAGAIN or EINTR and no bytes were
2371 * written, return 0 */
2381 evbuffer_write_atmost(struct evbuffer
*buffer
, evutil_socket_t fd
,
2386 EVBUFFER_LOCK(buffer
);
2388 if (buffer
->freeze_start
) {
2392 if (howmuch
< 0 || (size_t)howmuch
> buffer
->total_len
)
2393 howmuch
= buffer
->total_len
;
2397 struct evbuffer_chain
*chain
= buffer
->first
;
2398 if (chain
!= NULL
&& (chain
->flags
& EVBUFFER_SENDFILE
))
2399 n
= evbuffer_write_sendfile(buffer
, fd
, howmuch
);
2402 #ifdef USE_IOVEC_IMPL
2403 n
= evbuffer_write_iovec(buffer
, fd
, howmuch
);
2404 #elif defined(WIN32)
2405 /* XXX(nickm) Don't disable this code until we know if
2406 * the WSARecv code above works. */
2407 void *p
= evbuffer_pullup(buffer
, howmuch
);
2408 EVUTIL_ASSERT(p
|| !howmuch
);
2409 n
= send(fd
, p
, howmuch
, 0);
2411 void *p
= evbuffer_pullup(buffer
, howmuch
);
2412 EVUTIL_ASSERT(p
|| !howmuch
);
2413 n
= write(fd
, p
, howmuch
);
2421 evbuffer_drain(buffer
, n
);
2424 EVBUFFER_UNLOCK(buffer
);
2429 evbuffer_write(struct evbuffer
*buffer
, evutil_socket_t fd
)
2431 return evbuffer_write_atmost(buffer
, fd
, -1);
2435 evbuffer_find(struct evbuffer
*buffer
, const unsigned char *what
, size_t len
)
2437 unsigned char *search
;
2438 struct evbuffer_ptr ptr
;
2440 EVBUFFER_LOCK(buffer
);
2442 ptr
= evbuffer_search(buffer
, (const char *)what
, len
, NULL
);
2446 search
= evbuffer_pullup(buffer
, ptr
.pos
+ len
);
2450 EVBUFFER_UNLOCK(buffer
);
2455 evbuffer_ptr_set(struct evbuffer
*buf
, struct evbuffer_ptr
*pos
,
2456 size_t position
, enum evbuffer_ptr_how how
)
2458 size_t left
= position
;
2459 struct evbuffer_chain
*chain
= NULL
;
2464 case EVBUFFER_PTR_SET
:
2466 pos
->pos
= position
;
2469 case EVBUFFER_PTR_ADD
:
2470 /* this avoids iterating over all previous chains if
2471 we just want to advance the position */
2472 if (pos
->pos
< 0 || EV_SIZE_MAX
- position
< (size_t)pos
->pos
) {
2473 EVBUFFER_UNLOCK(buf
);
2476 chain
= pos
->_internal
.chain
;
2477 pos
->pos
+= position
;
2478 position
= pos
->_internal
.pos_in_chain
;
2482 EVUTIL_ASSERT(EV_SIZE_MAX
- left
>= position
);
2483 while (chain
&& position
+ left
>= chain
->off
) {
2484 left
-= chain
->off
- position
;
2485 chain
= chain
->next
;
2489 pos
->_internal
.chain
= chain
;
2490 pos
->_internal
.pos_in_chain
= position
+ left
;
2492 pos
->_internal
.chain
= NULL
;
2496 EVBUFFER_UNLOCK(buf
);
2498 return chain
!= NULL
? 0 : -1;
2502 Compare the bytes in buf at position pos to the len bytes in mem. Return
2503 less than 0, 0, or greater than 0 as memcmp.
2506 evbuffer_ptr_memcmp(const struct evbuffer
*buf
, const struct evbuffer_ptr
*pos
,
2507 const char *mem
, size_t len
)
2509 struct evbuffer_chain
*chain
;
2513 ASSERT_EVBUFFER_LOCKED(buf
);
2516 EV_SIZE_MAX
- len
< (size_t)pos
->pos
||
2517 pos
->pos
+ len
> buf
->total_len
)
2520 chain
= pos
->_internal
.chain
;
2521 position
= pos
->_internal
.pos_in_chain
;
2522 while (len
&& chain
) {
2523 size_t n_comparable
;
2524 if (len
+ position
> chain
->off
)
2525 n_comparable
= chain
->off
- position
;
2528 r
= memcmp(chain
->buffer
+ chain
->misalign
+ position
, mem
,
2532 mem
+= n_comparable
;
2533 len
-= n_comparable
;
2535 chain
= chain
->next
;
2542 evbuffer_search(struct evbuffer
*buffer
, const char *what
, size_t len
, const struct evbuffer_ptr
*start
)
2544 return evbuffer_search_range(buffer
, what
, len
, start
, NULL
);
2548 evbuffer_search_range(struct evbuffer
*buffer
, const char *what
, size_t len
, const struct evbuffer_ptr
*start
, const struct evbuffer_ptr
*end
)
2550 struct evbuffer_ptr pos
;
2551 struct evbuffer_chain
*chain
, *last_chain
= NULL
;
2552 const unsigned char *p
;
2555 EVBUFFER_LOCK(buffer
);
2558 memcpy(&pos
, start
, sizeof(pos
));
2559 chain
= pos
._internal
.chain
;
2562 chain
= pos
._internal
.chain
= buffer
->first
;
2563 pos
._internal
.pos_in_chain
= 0;
2567 last_chain
= end
->_internal
.chain
;
2569 if (!len
|| len
> EV_SSIZE_MAX
)
2575 const unsigned char *start_at
=
2576 chain
->buffer
+ chain
->misalign
+
2577 pos
._internal
.pos_in_chain
;
2578 p
= memchr(start_at
, first
,
2579 chain
->off
- pos
._internal
.pos_in_chain
);
2581 pos
.pos
+= p
- start_at
;
2582 pos
._internal
.pos_in_chain
+= p
- start_at
;
2583 if (!evbuffer_ptr_memcmp(buffer
, &pos
, what
, len
)) {
2584 if (end
&& pos
.pos
+ (ev_ssize_t
)len
> end
->pos
)
2590 ++pos
._internal
.pos_in_chain
;
2591 if (pos
._internal
.pos_in_chain
== chain
->off
) {
2592 chain
= pos
._internal
.chain
= chain
->next
;
2593 pos
._internal
.pos_in_chain
= 0;
2596 if (chain
== last_chain
)
2598 pos
.pos
+= chain
->off
- pos
._internal
.pos_in_chain
;
2599 chain
= pos
._internal
.chain
= chain
->next
;
2600 pos
._internal
.pos_in_chain
= 0;
2606 pos
._internal
.chain
= NULL
;
2608 EVBUFFER_UNLOCK(buffer
);
2613 evbuffer_peek(struct evbuffer
*buffer
, ev_ssize_t len
,
2614 struct evbuffer_ptr
*start_at
,
2615 struct evbuffer_iovec
*vec
, int n_vec
)
2617 struct evbuffer_chain
*chain
;
2619 ev_ssize_t len_so_far
= 0;
2621 EVBUFFER_LOCK(buffer
);
2624 chain
= start_at
->_internal
.chain
;
2625 len_so_far
= chain
->off
2626 - start_at
->_internal
.pos_in_chain
;
2629 vec
[0].iov_base
= chain
->buffer
+ chain
->misalign
2630 + start_at
->_internal
.pos_in_chain
;
2631 vec
[0].iov_len
= len_so_far
;
2633 chain
= chain
->next
;
2635 chain
= buffer
->first
;
2638 if (n_vec
== 0 && len
< 0) {
2639 /* If no vectors are provided and they asked for "everything",
2640 * pretend they asked for the actual available amount. */
2641 len
= buffer
->total_len
;
2643 len
-= start_at
->pos
;
2648 if (len
>= 0 && len_so_far
>= len
)
2651 vec
[idx
].iov_base
= chain
->buffer
+ chain
->misalign
;
2652 vec
[idx
].iov_len
= chain
->off
;
2657 len_so_far
+= chain
->off
;
2658 chain
= chain
->next
;
2661 EVBUFFER_UNLOCK(buffer
);
2668 evbuffer_add_vprintf(struct evbuffer
*buf
, const char *fmt
, va_list ap
)
2672 int sz
, result
= -1;
2674 struct evbuffer_chain
*chain
;
2679 if (buf
->freeze_end
) {
2683 /* make sure that at least some space is available */
2684 if ((chain
= evbuffer_expand_singlechain(buf
, 64)) == NULL
)
2689 size_t used
= chain
->misalign
+ chain
->off
;
2690 buffer
= (char *)chain
->buffer
+ chain
->misalign
+ chain
->off
;
2691 EVUTIL_ASSERT(chain
->buffer_len
>= used
);
2692 space
= chain
->buffer_len
- used
;
2694 buffer
= (char*) CHAIN_SPACE_PTR(chain
);
2695 space
= (size_t) CHAIN_SPACE_LEN(chain
);
2698 #define va_copy(dst, src) memcpy(&(dst), &(src), sizeof(va_list))
2702 sz
= evutil_vsnprintf(buffer
, space
, fmt
, aq
);
2708 if (INT_MAX
>= EVBUFFER_CHAIN_MAX
&&
2709 (size_t)sz
>= EVBUFFER_CHAIN_MAX
)
2711 if ((size_t)sz
< space
) {
2713 buf
->total_len
+= sz
;
2714 buf
->n_add_for_cb
+= sz
;
2716 advance_last_with_data(buf
);
2717 evbuffer_invoke_callbacks(buf
);
2721 if ((chain
= evbuffer_expand_singlechain(buf
, sz
+ 1)) == NULL
)
2727 EVBUFFER_UNLOCK(buf
);
2732 evbuffer_add_printf(struct evbuffer
*buf
, const char *fmt
, ...)
2738 res
= evbuffer_add_vprintf(buf
, fmt
, ap
);
2745 evbuffer_add_reference(struct evbuffer
*outbuf
,
2746 const void *data
, size_t datlen
,
2747 evbuffer_ref_cleanup_cb cleanupfn
, void *extra
)
2749 struct evbuffer_chain
*chain
;
2750 struct evbuffer_chain_reference
*info
;
2753 chain
= evbuffer_chain_new(sizeof(struct evbuffer_chain_reference
));
2756 chain
->flags
|= EVBUFFER_REFERENCE
| EVBUFFER_IMMUTABLE
;
2757 chain
->buffer
= __UNCONST(data
);
2758 chain
->buffer_len
= datlen
;
2759 chain
->off
= datlen
;
2761 info
= EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_reference
, chain
);
2762 info
->cleanupfn
= cleanupfn
;
2763 info
->extra
= extra
;
2765 EVBUFFER_LOCK(outbuf
);
2766 if (outbuf
->freeze_end
) {
2767 /* don't call chain_free; we do not want to actually invoke
2768 * the cleanup function */
2772 evbuffer_chain_insert(outbuf
, chain
);
2773 outbuf
->n_add_for_cb
+= datlen
;
2775 evbuffer_invoke_callbacks(outbuf
);
2779 EVBUFFER_UNLOCK(outbuf
);
2784 /* TODO(niels): maybe we don't want to own the fd, however, in that
2785 * case, we should dup it - dup is cheap. Perhaps, we should use a
2788 /* TODO(niels): we may want to add to automagically convert to mmap, in
2789 * case evbuffer_remove() or evbuffer_pullup() are being used.
2792 evbuffer_add_file(struct evbuffer
*outbuf
, int fd
,
2793 ev_off_t offset
, ev_off_t length
)
2795 #if defined(USE_SENDFILE) || defined(_EVENT_HAVE_MMAP)
2796 struct evbuffer_chain
*chain
;
2797 struct evbuffer_chain_fd
*info
;
2799 #if defined(USE_SENDFILE)
2800 int sendfile_okay
= 1;
2804 if (offset
< 0 || length
< 0 ||
2805 ((ev_uint64_t
)length
> EVBUFFER_CHAIN_MAX
) ||
2806 (ev_uint64_t
)offset
> (ev_uint64_t
)(EVBUFFER_CHAIN_MAX
- length
))
2809 #if defined(USE_SENDFILE)
2811 EVBUFFER_LOCK(outbuf
);
2812 sendfile_okay
= outbuf
->flags
& EVBUFFER_FLAG_DRAINS_TO_FD
;
2813 EVBUFFER_UNLOCK(outbuf
);
2816 if (use_sendfile
&& sendfile_okay
) {
2817 chain
= evbuffer_chain_new(sizeof(struct evbuffer_chain_fd
));
2818 if (chain
== NULL
) {
2819 event_warn("%s: out of memory", __func__
);
2823 chain
->flags
|= EVBUFFER_SENDFILE
| EVBUFFER_IMMUTABLE
;
2824 chain
->buffer
= NULL
; /* no reading possible */
2825 chain
->buffer_len
= length
+ offset
;
2826 chain
->off
= length
;
2827 chain
->misalign
= offset
;
2829 info
= EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd
, chain
);
2832 EVBUFFER_LOCK(outbuf
);
2833 if (outbuf
->freeze_end
) {
2837 outbuf
->n_add_for_cb
+= length
;
2838 evbuffer_chain_insert(outbuf
, chain
);
2842 #if defined(_EVENT_HAVE_MMAP)
2844 void *mapped
= mmap(NULL
, length
+ offset
, PROT_READ
,
2853 /* some mmap implementations require offset to be a multiple of
2854 * the page size. most users of this api, are likely to use 0
2855 * so mapping everything is not likely to be a problem.
2856 * TODO(niels): determine page size and round offset to that
2857 * page size to avoid mapping too much memory.
2859 if (mapped
== MAP_FAILED
) {
2860 event_warn("%s: mmap(%d, %d, %zu) failed",
2861 __func__
, fd
, 0, (size_t)(offset
+ length
));
2864 chain
= evbuffer_chain_new(sizeof(struct evbuffer_chain_fd
));
2865 if (chain
== NULL
) {
2866 event_warn("%s: out of memory", __func__
);
2867 munmap(mapped
, length
);
2871 chain
->flags
|= EVBUFFER_MMAP
| EVBUFFER_IMMUTABLE
;
2872 chain
->buffer
= mapped
;
2873 chain
->buffer_len
= length
+ offset
;
2874 chain
->off
= length
+ offset
;
2876 info
= EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd
, chain
);
2879 EVBUFFER_LOCK(outbuf
);
2880 if (outbuf
->freeze_end
) {
2882 evbuffer_chain_free(chain
);
2885 outbuf
->n_add_for_cb
+= length
;
2887 evbuffer_chain_insert(outbuf
, chain
);
2889 /* we need to subtract whatever we don't need */
2890 evbuffer_drain(outbuf
, offset
);
2895 /* the default implementation */
2896 struct evbuffer
*tmp
= evbuffer_new();
2903 #define lseek _lseeki64
2905 if (lseek(fd
, offset
, SEEK_SET
) == -1) {
2910 /* we add everything to a temporary buffer, so that we
2911 * can abort without side effects if the read fails.
2914 ev_ssize_t to_read
= length
> EV_SSIZE_MAX
? EV_SSIZE_MAX
: (ev_ssize_t
)length
;
2915 read
= evbuffer_readfile(tmp
, fd
, to_read
);
2924 EVBUFFER_LOCK(outbuf
);
2925 if (outbuf
->freeze_end
) {
2929 evbuffer_add_buffer(outbuf
, tmp
);
2933 #define close _close
2940 evbuffer_invoke_callbacks(outbuf
);
2941 EVBUFFER_UNLOCK(outbuf
);
2948 evbuffer_setcb(struct evbuffer
*buffer
, evbuffer_cb cb
, void *cbarg
)
2950 EVBUFFER_LOCK(buffer
);
2952 if (!TAILQ_EMPTY(&buffer
->callbacks
))
2953 evbuffer_remove_all_callbacks(buffer
);
2956 struct evbuffer_cb_entry
*ent
=
2957 evbuffer_add_cb(buffer
, NULL
, cbarg
);
2958 ent
->cb
.cb_obsolete
= cb
;
2959 ent
->flags
|= EVBUFFER_CB_OBSOLETE
;
2961 EVBUFFER_UNLOCK(buffer
);
2964 struct evbuffer_cb_entry
*
2965 evbuffer_add_cb(struct evbuffer
*buffer
, evbuffer_cb_func cb
, void *cbarg
)
2967 struct evbuffer_cb_entry
*e
;
2968 if (! (e
= mm_calloc(1, sizeof(struct evbuffer_cb_entry
))))
2970 EVBUFFER_LOCK(buffer
);
2973 e
->flags
= EVBUFFER_CB_ENABLED
;
2974 TAILQ_INSERT_HEAD(&buffer
->callbacks
, e
, next
);
2975 EVBUFFER_UNLOCK(buffer
);
2980 evbuffer_remove_cb_entry(struct evbuffer
*buffer
,
2981 struct evbuffer_cb_entry
*ent
)
2983 EVBUFFER_LOCK(buffer
);
2984 TAILQ_REMOVE(&buffer
->callbacks
, ent
, next
);
2985 EVBUFFER_UNLOCK(buffer
);
2991 evbuffer_remove_cb(struct evbuffer
*buffer
, evbuffer_cb_func cb
, void *cbarg
)
2993 struct evbuffer_cb_entry
*cbent
;
2995 EVBUFFER_LOCK(buffer
);
2996 TAILQ_FOREACH(cbent
, &buffer
->callbacks
, next
) {
2997 if (cb
== cbent
->cb
.cb_func
&& cbarg
== cbent
->cbarg
) {
2998 result
= evbuffer_remove_cb_entry(buffer
, cbent
);
3003 EVBUFFER_UNLOCK(buffer
);
3008 evbuffer_cb_set_flags(struct evbuffer
*buffer
,
3009 struct evbuffer_cb_entry
*cb
, ev_uint32_t flags
)
3011 /* the user isn't allowed to mess with these. */
3012 flags
&= ~EVBUFFER_CB_INTERNAL_FLAGS
;
3013 EVBUFFER_LOCK(buffer
);
3015 EVBUFFER_UNLOCK(buffer
);
3020 evbuffer_cb_clear_flags(struct evbuffer
*buffer
,
3021 struct evbuffer_cb_entry
*cb
, ev_uint32_t flags
)
3023 /* the user isn't allowed to mess with these. */
3024 flags
&= ~EVBUFFER_CB_INTERNAL_FLAGS
;
3025 EVBUFFER_LOCK(buffer
);
3026 cb
->flags
&= ~flags
;
3027 EVBUFFER_UNLOCK(buffer
);
3032 evbuffer_freeze(struct evbuffer
*buffer
, int start
)
3034 EVBUFFER_LOCK(buffer
);
3036 buffer
->freeze_start
= 1;
3038 buffer
->freeze_end
= 1;
3039 EVBUFFER_UNLOCK(buffer
);
3044 evbuffer_unfreeze(struct evbuffer
*buffer
, int start
)
3046 EVBUFFER_LOCK(buffer
);
3048 buffer
->freeze_start
= 0;
3050 buffer
->freeze_end
= 0;
3051 EVBUFFER_UNLOCK(buffer
);
3057 evbuffer_cb_suspend(struct evbuffer
*buffer
, struct evbuffer_cb_entry
*cb
)
3059 if (!(cb
->flags
& EVBUFFER_CB_SUSPENDED
)) {
3060 cb
->size_before_suspend
= evbuffer_get_length(buffer
);
3061 cb
->flags
|= EVBUFFER_CB_SUSPENDED
;
3066 evbuffer_cb_unsuspend(struct evbuffer
*buffer
, struct evbuffer_cb_entry
*cb
)
3068 if ((cb
->flags
& EVBUFFER_CB_SUSPENDED
)) {
3069 unsigned call
= (cb
->flags
& EVBUFFER_CB_CALL_ON_UNSUSPEND
);
3070 size_t sz
= cb
->size_before_suspend
;
3071 cb
->flags
&= ~(EVBUFFER_CB_SUSPENDED
|
3072 EVBUFFER_CB_CALL_ON_UNSUSPEND
);
3073 cb
->size_before_suspend
= 0;
3074 if (call
&& (cb
->flags
& EVBUFFER_CB_ENABLED
)) {
3075 cb
->cb(buffer
, sz
, evbuffer_get_length(buffer
), cb
->cbarg
);
3081 /* These hooks are exposed so that the unit tests can temporarily disable
3082 * sendfile support in order to test mmap, or both to test linear
3083 * access. Don't use it; if we need to add a way to disable sendfile support
3084 * in the future, it will probably be via an alternate version of
3085 * evbuffer_add_file() with a 'flags' argument.
3087 int _evbuffer_testing_use_sendfile(void);
3088 int _evbuffer_testing_use_mmap(void);
3089 int _evbuffer_testing_use_linear_file_access(void);
3092 _evbuffer_testing_use_sendfile(void)
3099 #ifdef _EVENT_HAVE_MMAP
3105 _evbuffer_testing_use_mmap(void)
3111 #ifdef _EVENT_HAVE_MMAP
3118 _evbuffer_testing_use_linear_file_access(void)
3123 #ifdef _EVENT_HAVE_MMAP