1 /* $NetBSD: bufferevent_ratelim.c,v 1.2 2013/04/11 16:56:41 christos Exp $ */
3 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
4 * Copyright (c) 2002-2006 Niels Provos <provos@citi.umich.edu>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/types.h>
35 #include "event2/event.h"
36 #include "event2/event_struct.h"
37 #include "event2/util.h"
38 #include "event2/bufferevent.h"
39 #include "event2/bufferevent_struct.h"
40 #include "event2/buffer.h"
42 #include "ratelim-internal.h"
44 #include "bufferevent-internal.h"
45 #include "mm-internal.h"
46 #include "util-internal.h"
47 #include "event-internal.h"
50 ev_token_bucket_init(struct ev_token_bucket
*bucket
,
51 const struct ev_token_bucket_cfg
*cfg
,
52 ev_uint32_t current_tick
,
56 /* on reinitialization, we only clip downwards, since we've
57 already used who-knows-how-much bandwidth this tick. We
58 leave "last_updated" as it is; the next update will add the
59 appropriate amount of bandwidth to the bucket.
61 if (bucket
->read_limit
> (ev_int64_t
) cfg
->read_maximum
)
62 bucket
->read_limit
= cfg
->read_maximum
;
63 if (bucket
->write_limit
> (ev_int64_t
) cfg
->write_maximum
)
64 bucket
->write_limit
= cfg
->write_maximum
;
66 bucket
->read_limit
= cfg
->read_rate
;
67 bucket
->write_limit
= cfg
->write_rate
;
68 bucket
->last_updated
= current_tick
;
74 ev_token_bucket_update(struct ev_token_bucket
*bucket
,
75 const struct ev_token_bucket_cfg
*cfg
,
76 ev_uint32_t current_tick
)
78 /* It's okay if the tick number overflows, since we'll just
79 * wrap around when we do the unsigned substraction. */
80 unsigned n_ticks
= current_tick
- bucket
->last_updated
;
82 /* Make sure some ticks actually happened, and that time didn't
84 if (n_ticks
== 0 || n_ticks
> INT_MAX
)
87 /* Naively, we would say
88 bucket->limit += n_ticks * cfg->rate;
90 if (bucket->limit > cfg->maximum)
91 bucket->limit = cfg->maximum;
93 But we're worried about overflow, so we do it like this:
96 if ((cfg
->read_maximum
- bucket
->read_limit
) / n_ticks
< cfg
->read_rate
)
97 bucket
->read_limit
= cfg
->read_maximum
;
99 bucket
->read_limit
+= n_ticks
* cfg
->read_rate
;
102 if ((cfg
->write_maximum
- bucket
->write_limit
) / n_ticks
< cfg
->write_rate
)
103 bucket
->write_limit
= cfg
->write_maximum
;
105 bucket
->write_limit
+= n_ticks
* cfg
->write_rate
;
108 bucket
->last_updated
= current_tick
;
114 bufferevent_update_buckets(struct bufferevent_private
*bev
)
116 /* Must hold lock on bev. */
119 event_base_gettimeofday_cached(bev
->bev
.ev_base
, &now
);
120 tick
= ev_token_bucket_get_tick(&now
, bev
->rate_limiting
->cfg
);
121 if (tick
!= bev
->rate_limiting
->limit
.last_updated
)
122 ev_token_bucket_update(&bev
->rate_limiting
->limit
,
123 bev
->rate_limiting
->cfg
, tick
);
127 ev_token_bucket_get_tick(const struct timeval
*tv
,
128 const struct ev_token_bucket_cfg
*cfg
)
130 /* This computation uses two multiplies and a divide. We could do
131 * fewer if we knew that the tick length was an integer number of
132 * seconds, or if we knew it divided evenly into a second. We should
133 * investigate that more.
136 /* We cast to an ev_uint64_t first, since we don't want to overflow
137 * before we do the final divide. */
138 ev_uint64_t msec
= (ev_uint64_t
)tv
->tv_sec
* 1000 + tv
->tv_usec
/ 1000;
139 return (unsigned)(msec
/ cfg
->msec_per_tick
);
142 struct ev_token_bucket_cfg
*
143 ev_token_bucket_cfg_new(size_t read_rate
, size_t read_burst
,
144 size_t write_rate
, size_t write_burst
,
145 const struct timeval
*tick_len
)
147 struct ev_token_bucket_cfg
*r
;
154 if (read_rate
> read_burst
|| write_rate
> write_burst
||
155 read_rate
< 1 || write_rate
< 1)
157 if (read_rate
> EV_RATE_LIMIT_MAX
||
158 write_rate
> EV_RATE_LIMIT_MAX
||
159 read_burst
> EV_RATE_LIMIT_MAX
||
160 write_burst
> EV_RATE_LIMIT_MAX
)
162 r
= mm_calloc(1, sizeof(struct ev_token_bucket_cfg
));
165 r
->read_rate
= read_rate
;
166 r
->write_rate
= write_rate
;
167 r
->read_maximum
= read_burst
;
168 r
->write_maximum
= write_burst
;
169 memcpy(&r
->tick_timeout
, tick_len
, sizeof(struct timeval
));
170 r
->msec_per_tick
= (tick_len
->tv_sec
* 1000) +
171 (tick_len
->tv_usec
& COMMON_TIMEOUT_MICROSECONDS_MASK
)/1000;
176 ev_token_bucket_cfg_free(struct ev_token_bucket_cfg
*cfg
)
181 /* No matter how big our bucket gets, don't try to read more than this
182 * much in a single read operation. */
183 #define MAX_TO_READ_EVER 16384
184 /* No matter how big our bucket gets, don't try to write more than this
185 * much in a single write operation. */
186 #define MAX_TO_WRITE_EVER 16384
188 #define LOCK_GROUP(g) EVLOCK_LOCK((g)->lock, 0)
189 #define UNLOCK_GROUP(g) EVLOCK_UNLOCK((g)->lock, 0)
191 static int _bev_group_suspend_reading(struct bufferevent_rate_limit_group
*g
);
192 static int _bev_group_suspend_writing(struct bufferevent_rate_limit_group
*g
);
193 static void _bev_group_unsuspend_reading(struct bufferevent_rate_limit_group
*g
);
194 static void _bev_group_unsuspend_writing(struct bufferevent_rate_limit_group
*g
);
196 /** Helper: figure out the maximum amount we should write if is_write, or
197 the maximum amount we should read if is_read. Return that maximum, or
198 0 if our bucket is wholly exhausted.
200 static inline ev_ssize_t
201 _bufferevent_get_rlim_max(struct bufferevent_private
*bev
, int is_write
)
203 /* needs lock on bev. */
204 ev_ssize_t max_so_far
= is_write
?MAX_TO_WRITE_EVER
:MAX_TO_READ_EVER
;
207 (is_write ? (x).write_limit : (x).read_limit)
209 #define GROUP_SUSPENDED(g) \
210 (is_write ? (g)->write_suspended : (g)->read_suspended)
212 /* Sets max_so_far to MIN(x, max_so_far) */
215 if (max_so_far > (x)) \
217 } while (/*CONSTCOND*/0);
219 if (!bev
->rate_limiting
)
222 /* If rate-limiting is enabled at all, update the appropriate
223 bucket, and take the smaller of our rate limit and the group
227 if (bev
->rate_limiting
->cfg
) {
228 bufferevent_update_buckets(bev
);
229 max_so_far
= LIM(bev
->rate_limiting
->limit
);
231 if (bev
->rate_limiting
->group
) {
232 struct bufferevent_rate_limit_group
*g
=
233 bev
->rate_limiting
->group
;
236 if (GROUP_SUSPENDED(g
)) {
237 /* We can get here if we failed to lock this
238 * particular bufferevent while suspending the whole
241 bufferevent_suspend_write(&bev
->bev
,
242 BEV_SUSPEND_BW_GROUP
);
244 bufferevent_suspend_read(&bev
->bev
,
245 BEV_SUSPEND_BW_GROUP
);
248 /* XXXX probably we should divide among the active
249 * members, not the total members. */
250 share
= LIM(g
->rate_limit
) / g
->n_members
;
251 if (share
< g
->min_share
)
252 share
= g
->min_share
;
264 _bufferevent_get_read_max(struct bufferevent_private
*bev
)
266 return _bufferevent_get_rlim_max(bev
, 0);
270 _bufferevent_get_write_max(struct bufferevent_private
*bev
)
272 return _bufferevent_get_rlim_max(bev
, 1);
276 _bufferevent_decrement_read_buckets(struct bufferevent_private
*bev
, ev_ssize_t bytes
)
278 /* XXXXX Make sure all users of this function check its return value */
280 /* need to hold lock on bev */
281 if (!bev
->rate_limiting
)
284 if (bev
->rate_limiting
->cfg
) {
285 bev
->rate_limiting
->limit
.read_limit
-= bytes
;
286 if (bev
->rate_limiting
->limit
.read_limit
<= 0) {
287 bufferevent_suspend_read(&bev
->bev
, BEV_SUSPEND_BW
);
288 if (event_add(&bev
->rate_limiting
->refill_bucket_event
,
289 &bev
->rate_limiting
->cfg
->tick_timeout
) < 0)
291 } else if (bev
->read_suspended
& BEV_SUSPEND_BW
) {
292 if (!(bev
->write_suspended
& BEV_SUSPEND_BW
))
293 event_del(&bev
->rate_limiting
->refill_bucket_event
);
294 bufferevent_unsuspend_read(&bev
->bev
, BEV_SUSPEND_BW
);
298 if (bev
->rate_limiting
->group
) {
299 LOCK_GROUP(bev
->rate_limiting
->group
);
300 bev
->rate_limiting
->group
->rate_limit
.read_limit
-= bytes
;
301 bev
->rate_limiting
->group
->total_read
+= bytes
;
302 if (bev
->rate_limiting
->group
->rate_limit
.read_limit
<= 0) {
303 _bev_group_suspend_reading(bev
->rate_limiting
->group
);
304 } else if (bev
->rate_limiting
->group
->read_suspended
) {
305 _bev_group_unsuspend_reading(bev
->rate_limiting
->group
);
307 UNLOCK_GROUP(bev
->rate_limiting
->group
);
314 _bufferevent_decrement_write_buckets(struct bufferevent_private
*bev
, ev_ssize_t bytes
)
316 /* XXXXX Make sure all users of this function check its return value */
318 /* need to hold lock */
319 if (!bev
->rate_limiting
)
322 if (bev
->rate_limiting
->cfg
) {
323 bev
->rate_limiting
->limit
.write_limit
-= bytes
;
324 if (bev
->rate_limiting
->limit
.write_limit
<= 0) {
325 bufferevent_suspend_write(&bev
->bev
, BEV_SUSPEND_BW
);
326 if (event_add(&bev
->rate_limiting
->refill_bucket_event
,
327 &bev
->rate_limiting
->cfg
->tick_timeout
) < 0)
329 } else if (bev
->write_suspended
& BEV_SUSPEND_BW
) {
330 if (!(bev
->read_suspended
& BEV_SUSPEND_BW
))
331 event_del(&bev
->rate_limiting
->refill_bucket_event
);
332 bufferevent_unsuspend_write(&bev
->bev
, BEV_SUSPEND_BW
);
336 if (bev
->rate_limiting
->group
) {
337 LOCK_GROUP(bev
->rate_limiting
->group
);
338 bev
->rate_limiting
->group
->rate_limit
.write_limit
-= bytes
;
339 bev
->rate_limiting
->group
->total_written
+= bytes
;
340 if (bev
->rate_limiting
->group
->rate_limit
.write_limit
<= 0) {
341 _bev_group_suspend_writing(bev
->rate_limiting
->group
);
342 } else if (bev
->rate_limiting
->group
->write_suspended
) {
343 _bev_group_unsuspend_writing(bev
->rate_limiting
->group
);
345 UNLOCK_GROUP(bev
->rate_limiting
->group
);
351 /** Stop reading on every bufferevent in <b>g</b> */
353 _bev_group_suspend_reading(struct bufferevent_rate_limit_group
*g
)
355 /* Needs group lock */
356 struct bufferevent_private
*bev
;
357 g
->read_suspended
= 1;
358 g
->pending_unsuspend_read
= 0;
360 /* Note that in this loop we call EVLOCK_TRY_LOCK instead of BEV_LOCK,
361 to prevent a deadlock. (Ordinarily, the group lock nests inside
362 the bufferevent locks. If we are unable to lock any individual
363 bufferevent, it will find out later when it looks at its limit
364 and sees that its group is suspended.
366 TAILQ_FOREACH(bev
, &g
->members
, rate_limiting
->next_in_group
) {
367 if (EVLOCK_TRY_LOCK(bev
->lock
)) {
368 bufferevent_suspend_read(&bev
->bev
,
369 BEV_SUSPEND_BW_GROUP
);
370 EVLOCK_UNLOCK(bev
->lock
, 0);
376 /** Stop writing on every bufferevent in <b>g</b> */
378 _bev_group_suspend_writing(struct bufferevent_rate_limit_group
*g
)
380 /* Needs group lock */
381 struct bufferevent_private
*bev
;
382 g
->write_suspended
= 1;
383 g
->pending_unsuspend_write
= 0;
384 TAILQ_FOREACH(bev
, &g
->members
, rate_limiting
->next_in_group
) {
385 if (EVLOCK_TRY_LOCK(bev
->lock
)) {
386 bufferevent_suspend_write(&bev
->bev
,
387 BEV_SUSPEND_BW_GROUP
);
388 EVLOCK_UNLOCK(bev
->lock
, 0);
394 /** Timer callback invoked on a single bufferevent with one or more exhausted
395 buckets when they are ready to refill. */
397 _bev_refill_callback(evutil_socket_t fd
, short what
, void *arg
)
401 struct bufferevent_private
*bev
= arg
;
404 if (!bev
->rate_limiting
|| !bev
->rate_limiting
->cfg
) {
405 BEV_UNLOCK(&bev
->bev
);
409 /* First, update the bucket */
410 event_base_gettimeofday_cached(bev
->bev
.ev_base
, &now
);
411 tick
= ev_token_bucket_get_tick(&now
,
412 bev
->rate_limiting
->cfg
);
413 ev_token_bucket_update(&bev
->rate_limiting
->limit
,
414 bev
->rate_limiting
->cfg
,
417 /* Now unsuspend any read/write operations as appropriate. */
418 if ((bev
->read_suspended
& BEV_SUSPEND_BW
)) {
419 if (bev
->rate_limiting
->limit
.read_limit
> 0)
420 bufferevent_unsuspend_read(&bev
->bev
, BEV_SUSPEND_BW
);
424 if ((bev
->write_suspended
& BEV_SUSPEND_BW
)) {
425 if (bev
->rate_limiting
->limit
.write_limit
> 0)
426 bufferevent_unsuspend_write(&bev
->bev
, BEV_SUSPEND_BW
);
431 /* One or more of the buckets may need another refill if they
434 XXXX if we need to be quiet for more ticks, we should
435 maybe figure out what timeout we really want.
437 /* XXXX Handle event_add failure somehow */
438 event_add(&bev
->rate_limiting
->refill_bucket_event
,
439 &bev
->rate_limiting
->cfg
->tick_timeout
);
441 BEV_UNLOCK(&bev
->bev
);
444 /** Helper: grab a random element from a bufferevent group. */
445 static struct bufferevent_private
*
446 _bev_group_random_element(struct bufferevent_rate_limit_group
*group
)
449 struct bufferevent_private
*bev
;
451 /* requires group lock */
453 if (!group
->n_members
)
456 EVUTIL_ASSERT(! TAILQ_EMPTY(&group
->members
));
458 which
= _evutil_weakrand() % group
->n_members
;
460 bev
= TAILQ_FIRST(&group
->members
);
462 bev
= TAILQ_NEXT(bev
, rate_limiting
->next_in_group
);
467 /** Iterate over the elements of a rate-limiting group 'g' with a random
468 starting point, assigning each to the variable 'bev', and executing the
471 We do this in a half-baked effort to get fairness among group members.
472 XXX Round-robin or some kind of priority queue would be even more fair.
474 #define FOREACH_RANDOM_ORDER(block) \
476 first = _bev_group_random_element(g); \
477 for (bev = first; bev != TAILQ_END(&g->members); \
478 bev = TAILQ_NEXT(bev, rate_limiting->next_in_group)) { \
481 for (bev = TAILQ_FIRST(&g->members); bev && bev != first; \
482 bev = TAILQ_NEXT(bev, rate_limiting->next_in_group)) { \
485 } while (/*CONSTCOND*/0)
488 _bev_group_unsuspend_reading(struct bufferevent_rate_limit_group
*g
)
491 struct bufferevent_private
*bev
, *first
;
493 g
->read_suspended
= 0;
494 FOREACH_RANDOM_ORDER({
495 if (EVLOCK_TRY_LOCK(bev
->lock
)) {
496 bufferevent_unsuspend_read(&bev
->bev
,
497 BEV_SUSPEND_BW_GROUP
);
498 EVLOCK_UNLOCK(bev
->lock
, 0);
503 g
->pending_unsuspend_read
= again
;
507 _bev_group_unsuspend_writing(struct bufferevent_rate_limit_group
*g
)
510 struct bufferevent_private
*bev
, *first
;
511 g
->write_suspended
= 0;
513 FOREACH_RANDOM_ORDER({
514 if (EVLOCK_TRY_LOCK(bev
->lock
)) {
515 bufferevent_unsuspend_write(&bev
->bev
,
516 BEV_SUSPEND_BW_GROUP
);
517 EVLOCK_UNLOCK(bev
->lock
, 0);
522 g
->pending_unsuspend_write
= again
;
525 /** Callback invoked every tick to add more elements to the group bucket
526 and unsuspend group members as needed.
529 _bev_group_refill_callback(evutil_socket_t fd
, short what
, void *arg
)
531 struct bufferevent_rate_limit_group
*g
= arg
;
535 event_base_gettimeofday_cached(event_get_base(&g
->master_refill_event
), &now
);
539 tick
= ev_token_bucket_get_tick(&now
, &g
->rate_limit_cfg
);
540 ev_token_bucket_update(&g
->rate_limit
, &g
->rate_limit_cfg
, tick
);
542 if (g
->pending_unsuspend_read
||
543 (g
->read_suspended
&& (g
->rate_limit
.read_limit
>= g
->min_share
))) {
544 _bev_group_unsuspend_reading(g
);
546 if (g
->pending_unsuspend_write
||
547 (g
->write_suspended
&& (g
->rate_limit
.write_limit
>= g
->min_share
))){
548 _bev_group_unsuspend_writing(g
);
551 /* XXXX Rather than waiting to the next tick to unsuspend stuff
552 * with pending_unsuspend_write/read, we should do it on the
553 * next iteration of the mainloop.
560 bufferevent_set_rate_limit(struct bufferevent
*bev
,
561 struct ev_token_bucket_cfg
*cfg
)
563 struct bufferevent_private
*bevp
=
564 EVUTIL_UPCAST(bev
, struct bufferevent_private
, bev
);
566 struct bufferevent_rate_limit
*rlim
;
569 int reinit
= 0, suspended
= 0;
570 /* XXX reference-count cfg */
575 if (bevp
->rate_limiting
) {
576 rlim
= bevp
->rate_limiting
;
578 bufferevent_unsuspend_read(bev
, BEV_SUSPEND_BW
);
579 bufferevent_unsuspend_write(bev
, BEV_SUSPEND_BW
);
580 if (event_initialized(&rlim
->refill_bucket_event
))
581 event_del(&rlim
->refill_bucket_event
);
587 event_base_gettimeofday_cached(bev
->ev_base
, &now
);
588 tick
= ev_token_bucket_get_tick(&now
, cfg
);
590 if (bevp
->rate_limiting
&& bevp
->rate_limiting
->cfg
== cfg
) {
595 if (bevp
->rate_limiting
== NULL
) {
596 rlim
= mm_calloc(1, sizeof(struct bufferevent_rate_limit
));
599 bevp
->rate_limiting
= rlim
;
601 rlim
= bevp
->rate_limiting
;
603 reinit
= rlim
->cfg
!= NULL
;
606 ev_token_bucket_init(&rlim
->limit
, cfg
, tick
, reinit
);
609 EVUTIL_ASSERT(event_initialized(&rlim
->refill_bucket_event
));
610 event_del(&rlim
->refill_bucket_event
);
612 evtimer_assign(&rlim
->refill_bucket_event
, bev
->ev_base
,
613 _bev_refill_callback
, bevp
);
615 if (rlim
->limit
.read_limit
> 0) {
616 bufferevent_unsuspend_read(bev
, BEV_SUSPEND_BW
);
618 bufferevent_suspend_read(bev
, BEV_SUSPEND_BW
);
621 if (rlim
->limit
.write_limit
> 0) {
622 bufferevent_unsuspend_write(bev
, BEV_SUSPEND_BW
);
624 bufferevent_suspend_write(bev
, BEV_SUSPEND_BW
);
629 event_add(&rlim
->refill_bucket_event
, &cfg
->tick_timeout
);
638 struct bufferevent_rate_limit_group
*
639 bufferevent_rate_limit_group_new(struct event_base
*base
,
640 const struct ev_token_bucket_cfg
*cfg
)
642 struct bufferevent_rate_limit_group
*g
;
646 event_base_gettimeofday_cached(base
, &now
);
647 tick
= ev_token_bucket_get_tick(&now
, cfg
);
649 g
= mm_calloc(1, sizeof(struct bufferevent_rate_limit_group
));
652 memcpy(&g
->rate_limit_cfg
, cfg
, sizeof(g
->rate_limit_cfg
));
653 TAILQ_INIT(&g
->members
);
655 ev_token_bucket_init(&g
->rate_limit
, cfg
, tick
, 0);
657 event_assign(&g
->master_refill_event
, base
, -1, EV_PERSIST
,
658 _bev_group_refill_callback
, g
);
659 /*XXXX handle event_add failure */
660 event_add(&g
->master_refill_event
, &cfg
->tick_timeout
);
662 EVTHREAD_ALLOC_LOCK(g
->lock
, EVTHREAD_LOCKTYPE_RECURSIVE
);
664 bufferevent_rate_limit_group_set_min_share(g
, 64);
670 bufferevent_rate_limit_group_set_cfg(
671 struct bufferevent_rate_limit_group
*g
,
672 const struct ev_token_bucket_cfg
*cfg
)
679 same_tick
= evutil_timercmp(
680 &g
->rate_limit_cfg
.tick_timeout
, &cfg
->tick_timeout
, ==);
681 memcpy(&g
->rate_limit_cfg
, cfg
, sizeof(g
->rate_limit_cfg
));
683 if (g
->rate_limit
.read_limit
> (ev_ssize_t
)cfg
->read_maximum
)
684 g
->rate_limit
.read_limit
= cfg
->read_maximum
;
685 if (g
->rate_limit
.write_limit
> (ev_ssize_t
)cfg
->write_maximum
)
686 g
->rate_limit
.write_limit
= cfg
->write_maximum
;
689 /* This can cause a hiccup in the schedule */
690 event_add(&g
->master_refill_event
, &cfg
->tick_timeout
);
693 /* The new limits might force us to adjust min_share differently. */
694 bufferevent_rate_limit_group_set_min_share(g
, g
->configured_min_share
);
701 bufferevent_rate_limit_group_set_min_share(
702 struct bufferevent_rate_limit_group
*g
,
705 if (share
> EV_SSIZE_MAX
)
708 g
->configured_min_share
= share
;
710 /* Can't set share to less than the one-tick maximum. IOW, at steady
711 * state, at least one connection can go per tick. */
712 if (share
> g
->rate_limit_cfg
.read_rate
)
713 share
= g
->rate_limit_cfg
.read_rate
;
714 if (share
> g
->rate_limit_cfg
.write_rate
)
715 share
= g
->rate_limit_cfg
.write_rate
;
717 g
->min_share
= share
;
722 bufferevent_rate_limit_group_free(struct bufferevent_rate_limit_group
*g
)
725 EVUTIL_ASSERT(0 == g
->n_members
);
726 event_del(&g
->master_refill_event
);
728 EVTHREAD_FREE_LOCK(g
->lock
, EVTHREAD_LOCKTYPE_RECURSIVE
);
733 bufferevent_add_to_rate_limit_group(struct bufferevent
*bev
,
734 struct bufferevent_rate_limit_group
*g
)
736 int wsuspend
, rsuspend
;
737 struct bufferevent_private
*bevp
=
738 EVUTIL_UPCAST(bev
, struct bufferevent_private
, bev
);
741 if (!bevp
->rate_limiting
) {
742 struct bufferevent_rate_limit
*rlim
;
743 rlim
= mm_calloc(1, sizeof(struct bufferevent_rate_limit
));
748 evtimer_assign(&rlim
->refill_bucket_event
, bev
->ev_base
,
749 _bev_refill_callback
, bevp
);
750 bevp
->rate_limiting
= rlim
;
753 if (bevp
->rate_limiting
->group
== g
) {
757 if (bevp
->rate_limiting
->group
)
758 bufferevent_remove_from_rate_limit_group(bev
);
761 bevp
->rate_limiting
->group
= g
;
763 TAILQ_INSERT_TAIL(&g
->members
, bevp
, rate_limiting
->next_in_group
);
765 rsuspend
= g
->read_suspended
;
766 wsuspend
= g
->write_suspended
;
771 bufferevent_suspend_read(bev
, BEV_SUSPEND_BW_GROUP
);
773 bufferevent_suspend_write(bev
, BEV_SUSPEND_BW_GROUP
);
780 bufferevent_remove_from_rate_limit_group(struct bufferevent
*bev
)
782 return bufferevent_remove_from_rate_limit_group_internal(bev
, 1);
786 bufferevent_remove_from_rate_limit_group_internal(struct bufferevent
*bev
,
789 struct bufferevent_private
*bevp
=
790 EVUTIL_UPCAST(bev
, struct bufferevent_private
, bev
);
792 if (bevp
->rate_limiting
&& bevp
->rate_limiting
->group
) {
793 struct bufferevent_rate_limit_group
*g
=
794 bevp
->rate_limiting
->group
;
796 bevp
->rate_limiting
->group
= NULL
;
798 TAILQ_REMOVE(&g
->members
, bevp
, rate_limiting
->next_in_group
);
802 bufferevent_unsuspend_read(bev
, BEV_SUSPEND_BW_GROUP
);
803 bufferevent_unsuspend_write(bev
, BEV_SUSPEND_BW_GROUP
);
810 * API functions to expose rate limits.
812 * Don't use these from inside Libevent; they're meant to be for use by
816 /* Mostly you don't want to use this function from inside libevent;
817 * _bufferevent_get_read_max() is more likely what you want*/
819 bufferevent_get_read_limit(struct bufferevent
*bev
)
822 struct bufferevent_private
*bevp
;
824 bevp
= BEV_UPCAST(bev
);
825 if (bevp
->rate_limiting
&& bevp
->rate_limiting
->cfg
) {
826 bufferevent_update_buckets(bevp
);
827 r
= bevp
->rate_limiting
->limit
.read_limit
;
835 /* Mostly you don't want to use this function from inside libevent;
836 * _bufferevent_get_write_max() is more likely what you want*/
838 bufferevent_get_write_limit(struct bufferevent
*bev
)
841 struct bufferevent_private
*bevp
;
843 bevp
= BEV_UPCAST(bev
);
844 if (bevp
->rate_limiting
&& bevp
->rate_limiting
->cfg
) {
845 bufferevent_update_buckets(bevp
);
846 r
= bevp
->rate_limiting
->limit
.write_limit
;
855 bufferevent_get_max_to_read(struct bufferevent
*bev
)
859 r
= _bufferevent_get_read_max(BEV_UPCAST(bev
));
865 bufferevent_get_max_to_write(struct bufferevent
*bev
)
869 r
= _bufferevent_get_write_max(BEV_UPCAST(bev
));
875 /* Mostly you don't want to use this function from inside libevent;
876 * _bufferevent_get_read_max() is more likely what you want*/
878 bufferevent_rate_limit_group_get_read_limit(
879 struct bufferevent_rate_limit_group
*grp
)
883 r
= grp
->rate_limit
.read_limit
;
888 /* Mostly you don't want to use this function from inside libevent;
889 * _bufferevent_get_write_max() is more likely what you want. */
891 bufferevent_rate_limit_group_get_write_limit(
892 struct bufferevent_rate_limit_group
*grp
)
896 r
= grp
->rate_limit
.write_limit
;
902 bufferevent_decrement_read_limit(struct bufferevent
*bev
, ev_ssize_t decr
)
905 ev_ssize_t old_limit
, new_limit
;
906 struct bufferevent_private
*bevp
;
908 bevp
= BEV_UPCAST(bev
);
909 EVUTIL_ASSERT(bevp
->rate_limiting
&& bevp
->rate_limiting
->cfg
);
910 old_limit
= bevp
->rate_limiting
->limit
.read_limit
;
912 new_limit
= (bevp
->rate_limiting
->limit
.read_limit
-= decr
);
913 if (old_limit
> 0 && new_limit
<= 0) {
914 bufferevent_suspend_read(bev
, BEV_SUSPEND_BW
);
915 if (event_add(&bevp
->rate_limiting
->refill_bucket_event
,
916 &bevp
->rate_limiting
->cfg
->tick_timeout
) < 0)
918 } else if (old_limit
<= 0 && new_limit
> 0) {
919 if (!(bevp
->write_suspended
& BEV_SUSPEND_BW
))
920 event_del(&bevp
->rate_limiting
->refill_bucket_event
);
921 bufferevent_unsuspend_read(bev
, BEV_SUSPEND_BW
);
929 bufferevent_decrement_write_limit(struct bufferevent
*bev
, ev_ssize_t decr
)
931 /* XXXX this is mostly copy-and-paste from
932 * bufferevent_decrement_read_limit */
934 ev_ssize_t old_limit
, new_limit
;
935 struct bufferevent_private
*bevp
;
937 bevp
= BEV_UPCAST(bev
);
938 EVUTIL_ASSERT(bevp
->rate_limiting
&& bevp
->rate_limiting
->cfg
);
939 old_limit
= bevp
->rate_limiting
->limit
.write_limit
;
941 new_limit
= (bevp
->rate_limiting
->limit
.write_limit
-= decr
);
942 if (old_limit
> 0 && new_limit
<= 0) {
943 bufferevent_suspend_write(bev
, BEV_SUSPEND_BW
);
944 if (event_add(&bevp
->rate_limiting
->refill_bucket_event
,
945 &bevp
->rate_limiting
->cfg
->tick_timeout
) < 0)
947 } else if (old_limit
<= 0 && new_limit
> 0) {
948 if (!(bevp
->read_suspended
& BEV_SUSPEND_BW
))
949 event_del(&bevp
->rate_limiting
->refill_bucket_event
);
950 bufferevent_unsuspend_write(bev
, BEV_SUSPEND_BW
);
958 bufferevent_rate_limit_group_decrement_read(
959 struct bufferevent_rate_limit_group
*grp
, ev_ssize_t decr
)
962 ev_ssize_t old_limit
, new_limit
;
964 old_limit
= grp
->rate_limit
.read_limit
;
965 new_limit
= (grp
->rate_limit
.read_limit
-= decr
);
967 if (old_limit
> 0 && new_limit
<= 0) {
968 _bev_group_suspend_reading(grp
);
969 } else if (old_limit
<= 0 && new_limit
> 0) {
970 _bev_group_unsuspend_reading(grp
);
978 bufferevent_rate_limit_group_decrement_write(
979 struct bufferevent_rate_limit_group
*grp
, ev_ssize_t decr
)
982 ev_ssize_t old_limit
, new_limit
;
984 old_limit
= grp
->rate_limit
.write_limit
;
985 new_limit
= (grp
->rate_limit
.write_limit
-= decr
);
987 if (old_limit
> 0 && new_limit
<= 0) {
988 _bev_group_suspend_writing(grp
);
989 } else if (old_limit
<= 0 && new_limit
> 0) {
990 _bev_group_unsuspend_writing(grp
);
998 bufferevent_rate_limit_group_get_totals(struct bufferevent_rate_limit_group
*grp
,
999 ev_uint64_t
*total_read_out
, ev_uint64_t
*total_written_out
)
1001 EVUTIL_ASSERT(grp
!= NULL
);
1003 *total_read_out
= grp
->total_read
;
1004 if (total_written_out
)
1005 *total_written_out
= grp
->total_written
;
1009 bufferevent_rate_limit_group_reset_totals(struct bufferevent_rate_limit_group
*grp
)
1011 grp
->total_read
= grp
->total_written
= 0;