Merge branch 'akpm'
[linux-2.6/next.git] / net / sched / sch_sfb.c
blobe83c272c0325530edbedb86ac0c16c162e49fff0
1 /*
2 * net/sched/sch_sfb.c Stochastic Fair Blue
4 * Copyright (c) 2008-2011 Juliusz Chroboczek <jch@pps.jussieu.fr>
5 * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
11 * W. Feng, D. Kandlur, D. Saha, K. Shin. Blue:
12 * A New Class of Active Queue Management Algorithms.
13 * U. Michigan CSE-TR-387-99, April 1999.
15 * http://www.thefengs.com/wuchang/blue/CSE-TR-387-99.pdf
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/skbuff.h>
24 #include <linux/random.h>
25 #include <linux/jhash.h>
26 #include <net/ip.h>
27 #include <net/pkt_sched.h>
28 #include <net/inet_ecn.h>
31 * SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level)
32 * This implementation uses L = 8 and N = 16
33 * This permits us to split one 32bit hash (provided per packet by rxhash or
34 * external classifier) into 8 subhashes of 4 bits.
36 #define SFB_BUCKET_SHIFT 4
37 #define SFB_NUMBUCKETS (1 << SFB_BUCKET_SHIFT) /* N bins per Level */
38 #define SFB_BUCKET_MASK (SFB_NUMBUCKETS - 1)
39 #define SFB_LEVELS (32 / SFB_BUCKET_SHIFT) /* L */
41 /* SFB algo uses a virtual queue, named "bin" */
42 struct sfb_bucket {
43 u16 qlen; /* length of virtual queue */
44 u16 p_mark; /* marking probability */
47 /* We use a double buffering right before hash change
48 * (Section 4.4 of SFB reference : moving hash functions)
50 struct sfb_bins {
51 u32 perturbation; /* jhash perturbation */
52 struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS];
55 struct sfb_sched_data {
56 struct Qdisc *qdisc;
57 struct tcf_proto *filter_list;
58 unsigned long rehash_interval;
59 unsigned long warmup_time; /* double buffering warmup time in jiffies */
60 u32 max;
61 u32 bin_size; /* maximum queue length per bin */
62 u32 increment; /* d1 */
63 u32 decrement; /* d2 */
64 u32 limit; /* HARD maximal queue length */
65 u32 penalty_rate;
66 u32 penalty_burst;
67 u32 tokens_avail;
68 unsigned long rehash_time;
69 unsigned long token_time;
71 u8 slot; /* current active bins (0 or 1) */
72 bool double_buffering;
73 struct sfb_bins bins[2];
75 struct {
76 u32 earlydrop;
77 u32 penaltydrop;
78 u32 bucketdrop;
79 u32 queuedrop;
80 u32 childdrop; /* drops in child qdisc */
81 u32 marked; /* ECN mark */
82 } stats;
86 * Each queued skb might be hashed on one or two bins
87 * We store in skb_cb the two hash values.
88 * (A zero value means double buffering was not used)
90 struct sfb_skb_cb {
91 u32 hashes[2];
94 static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
96 BUILD_BUG_ON(sizeof(skb->cb) <
97 sizeof(struct qdisc_skb_cb) + sizeof(struct sfb_skb_cb));
98 return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data;
102 * If using 'internal' SFB flow classifier, hash comes from skb rxhash
103 * If using external classifier, hash comes from the classid.
105 static u32 sfb_hash(const struct sk_buff *skb, u32 slot)
107 return sfb_skb_cb(skb)->hashes[slot];
110 /* Probabilities are coded as Q0.16 fixed-point values,
111 * with 0xFFFF representing 65535/65536 (almost 1.0)
112 * Addition and subtraction are saturating in [0, 65535]
114 static u32 prob_plus(u32 p1, u32 p2)
116 u32 res = p1 + p2;
118 return min_t(u32, res, SFB_MAX_PROB);
121 static u32 prob_minus(u32 p1, u32 p2)
123 return p1 > p2 ? p1 - p2 : 0;
126 static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q)
128 int i;
129 struct sfb_bucket *b = &q->bins[slot].bins[0][0];
131 for (i = 0; i < SFB_LEVELS; i++) {
132 u32 hash = sfbhash & SFB_BUCKET_MASK;
134 sfbhash >>= SFB_BUCKET_SHIFT;
135 if (b[hash].qlen < 0xFFFF)
136 b[hash].qlen++;
137 b += SFB_NUMBUCKETS; /* next level */
141 static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
143 u32 sfbhash;
145 sfbhash = sfb_hash(skb, 0);
146 if (sfbhash)
147 increment_one_qlen(sfbhash, 0, q);
149 sfbhash = sfb_hash(skb, 1);
150 if (sfbhash)
151 increment_one_qlen(sfbhash, 1, q);
154 static void decrement_one_qlen(u32 sfbhash, u32 slot,
155 struct sfb_sched_data *q)
157 int i;
158 struct sfb_bucket *b = &q->bins[slot].bins[0][0];
160 for (i = 0; i < SFB_LEVELS; i++) {
161 u32 hash = sfbhash & SFB_BUCKET_MASK;
163 sfbhash >>= SFB_BUCKET_SHIFT;
164 if (b[hash].qlen > 0)
165 b[hash].qlen--;
166 b += SFB_NUMBUCKETS; /* next level */
170 static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
172 u32 sfbhash;
174 sfbhash = sfb_hash(skb, 0);
175 if (sfbhash)
176 decrement_one_qlen(sfbhash, 0, q);
178 sfbhash = sfb_hash(skb, 1);
179 if (sfbhash)
180 decrement_one_qlen(sfbhash, 1, q);
183 static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
185 b->p_mark = prob_minus(b->p_mark, q->decrement);
188 static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
190 b->p_mark = prob_plus(b->p_mark, q->increment);
193 static void sfb_zero_all_buckets(struct sfb_sched_data *q)
195 memset(&q->bins, 0, sizeof(q->bins));
199 * compute max qlen, max p_mark, and avg p_mark
201 static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q)
203 int i;
204 u32 qlen = 0, prob = 0, totalpm = 0;
205 const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0];
207 for (i = 0; i < SFB_LEVELS * SFB_NUMBUCKETS; i++) {
208 if (qlen < b->qlen)
209 qlen = b->qlen;
210 totalpm += b->p_mark;
211 if (prob < b->p_mark)
212 prob = b->p_mark;
213 b++;
215 *prob_r = prob;
216 *avgpm_r = totalpm / (SFB_LEVELS * SFB_NUMBUCKETS);
217 return qlen;
221 static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
223 q->bins[slot].perturbation = net_random();
226 static void sfb_swap_slot(struct sfb_sched_data *q)
228 sfb_init_perturbation(q->slot, q);
229 q->slot ^= 1;
230 q->double_buffering = false;
233 /* Non elastic flows are allowed to use part of the bandwidth, expressed
234 * in "penalty_rate" packets per second, with "penalty_burst" burst
236 static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q)
238 if (q->penalty_rate == 0 || q->penalty_burst == 0)
239 return true;
241 if (q->tokens_avail < 1) {
242 unsigned long age = min(10UL * HZ, jiffies - q->token_time);
244 q->tokens_avail = (age * q->penalty_rate) / HZ;
245 if (q->tokens_avail > q->penalty_burst)
246 q->tokens_avail = q->penalty_burst;
247 q->token_time = jiffies;
248 if (q->tokens_avail < 1)
249 return true;
252 q->tokens_avail--;
253 return false;
256 static bool sfb_classify(struct sk_buff *skb, struct sfb_sched_data *q,
257 int *qerr, u32 *salt)
259 struct tcf_result res;
260 int result;
262 result = tc_classify(skb, q->filter_list, &res);
263 if (result >= 0) {
264 #ifdef CONFIG_NET_CLS_ACT
265 switch (result) {
266 case TC_ACT_STOLEN:
267 case TC_ACT_QUEUED:
268 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
269 case TC_ACT_SHOT:
270 return false;
272 #endif
273 *salt = TC_H_MIN(res.classid);
274 return true;
276 return false;
279 static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
282 struct sfb_sched_data *q = qdisc_priv(sch);
283 struct Qdisc *child = q->qdisc;
284 int i;
285 u32 p_min = ~0;
286 u32 minqlen = ~0;
287 u32 r, slot, salt, sfbhash;
288 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
290 if (unlikely(sch->q.qlen >= q->limit)) {
291 sch->qstats.overlimits++;
292 q->stats.queuedrop++;
293 goto drop;
296 if (q->rehash_interval > 0) {
297 unsigned long limit = q->rehash_time + q->rehash_interval;
299 if (unlikely(time_after(jiffies, limit))) {
300 sfb_swap_slot(q);
301 q->rehash_time = jiffies;
302 } else if (unlikely(!q->double_buffering && q->warmup_time > 0 &&
303 time_after(jiffies, limit - q->warmup_time))) {
304 q->double_buffering = true;
308 if (q->filter_list) {
309 /* If using external classifiers, get result and record it. */
310 if (!sfb_classify(skb, q, &ret, &salt))
311 goto other_drop;
312 } else {
313 salt = skb_get_rxhash(skb);
316 slot = q->slot;
318 sfbhash = jhash_1word(salt, q->bins[slot].perturbation);
319 if (!sfbhash)
320 sfbhash = 1;
321 sfb_skb_cb(skb)->hashes[slot] = sfbhash;
323 for (i = 0; i < SFB_LEVELS; i++) {
324 u32 hash = sfbhash & SFB_BUCKET_MASK;
325 struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
327 sfbhash >>= SFB_BUCKET_SHIFT;
328 if (b->qlen == 0)
329 decrement_prob(b, q);
330 else if (b->qlen >= q->bin_size)
331 increment_prob(b, q);
332 if (minqlen > b->qlen)
333 minqlen = b->qlen;
334 if (p_min > b->p_mark)
335 p_min = b->p_mark;
338 slot ^= 1;
339 sfb_skb_cb(skb)->hashes[slot] = 0;
341 if (unlikely(minqlen >= q->max)) {
342 sch->qstats.overlimits++;
343 q->stats.bucketdrop++;
344 goto drop;
347 if (unlikely(p_min >= SFB_MAX_PROB)) {
348 /* Inelastic flow */
349 if (q->double_buffering) {
350 sfbhash = jhash_1word(salt, q->bins[slot].perturbation);
351 if (!sfbhash)
352 sfbhash = 1;
353 sfb_skb_cb(skb)->hashes[slot] = sfbhash;
355 for (i = 0; i < SFB_LEVELS; i++) {
356 u32 hash = sfbhash & SFB_BUCKET_MASK;
357 struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
359 sfbhash >>= SFB_BUCKET_SHIFT;
360 if (b->qlen == 0)
361 decrement_prob(b, q);
362 else if (b->qlen >= q->bin_size)
363 increment_prob(b, q);
366 if (sfb_rate_limit(skb, q)) {
367 sch->qstats.overlimits++;
368 q->stats.penaltydrop++;
369 goto drop;
371 goto enqueue;
374 r = net_random() & SFB_MAX_PROB;
376 if (unlikely(r < p_min)) {
377 if (unlikely(p_min > SFB_MAX_PROB / 2)) {
378 /* If we're marking that many packets, then either
379 * this flow is unresponsive, or we're badly congested.
380 * In either case, we want to start dropping packets.
382 if (r < (p_min - SFB_MAX_PROB / 2) * 2) {
383 q->stats.earlydrop++;
384 goto drop;
387 if (INET_ECN_set_ce(skb)) {
388 q->stats.marked++;
389 } else {
390 q->stats.earlydrop++;
391 goto drop;
395 enqueue:
396 ret = qdisc_enqueue(skb, child);
397 if (likely(ret == NET_XMIT_SUCCESS)) {
398 sch->q.qlen++;
399 increment_qlen(skb, q);
400 } else if (net_xmit_drop_count(ret)) {
401 q->stats.childdrop++;
402 sch->qstats.drops++;
404 return ret;
406 drop:
407 qdisc_drop(skb, sch);
408 return NET_XMIT_CN;
409 other_drop:
410 if (ret & __NET_XMIT_BYPASS)
411 sch->qstats.drops++;
412 kfree_skb(skb);
413 return ret;
416 static struct sk_buff *sfb_dequeue(struct Qdisc *sch)
418 struct sfb_sched_data *q = qdisc_priv(sch);
419 struct Qdisc *child = q->qdisc;
420 struct sk_buff *skb;
422 skb = child->dequeue(q->qdisc);
424 if (skb) {
425 qdisc_bstats_update(sch, skb);
426 sch->q.qlen--;
427 decrement_qlen(skb, q);
430 return skb;
433 static struct sk_buff *sfb_peek(struct Qdisc *sch)
435 struct sfb_sched_data *q = qdisc_priv(sch);
436 struct Qdisc *child = q->qdisc;
438 return child->ops->peek(child);
441 /* No sfb_drop -- impossible since the child doesn't return the dropped skb. */
443 static void sfb_reset(struct Qdisc *sch)
445 struct sfb_sched_data *q = qdisc_priv(sch);
447 qdisc_reset(q->qdisc);
448 sch->q.qlen = 0;
449 q->slot = 0;
450 q->double_buffering = false;
451 sfb_zero_all_buckets(q);
452 sfb_init_perturbation(0, q);
455 static void sfb_destroy(struct Qdisc *sch)
457 struct sfb_sched_data *q = qdisc_priv(sch);
459 tcf_destroy_chain(&q->filter_list);
460 qdisc_destroy(q->qdisc);
463 static const struct nla_policy sfb_policy[TCA_SFB_MAX + 1] = {
464 [TCA_SFB_PARMS] = { .len = sizeof(struct tc_sfb_qopt) },
467 static const struct tc_sfb_qopt sfb_default_ops = {
468 .rehash_interval = 600 * MSEC_PER_SEC,
469 .warmup_time = 60 * MSEC_PER_SEC,
470 .limit = 0,
471 .max = 25,
472 .bin_size = 20,
473 .increment = (SFB_MAX_PROB + 500) / 1000, /* 0.1 % */
474 .decrement = (SFB_MAX_PROB + 3000) / 6000,
475 .penalty_rate = 10,
476 .penalty_burst = 20,
479 static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
481 struct sfb_sched_data *q = qdisc_priv(sch);
482 struct Qdisc *child;
483 struct nlattr *tb[TCA_SFB_MAX + 1];
484 const struct tc_sfb_qopt *ctl = &sfb_default_ops;
485 u32 limit;
486 int err;
488 if (opt) {
489 err = nla_parse_nested(tb, TCA_SFB_MAX, opt, sfb_policy);
490 if (err < 0)
491 return -EINVAL;
493 if (tb[TCA_SFB_PARMS] == NULL)
494 return -EINVAL;
496 ctl = nla_data(tb[TCA_SFB_PARMS]);
499 limit = ctl->limit;
500 if (limit == 0)
501 limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
503 child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit);
504 if (IS_ERR(child))
505 return PTR_ERR(child);
507 sch_tree_lock(sch);
509 qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
510 qdisc_destroy(q->qdisc);
511 q->qdisc = child;
513 q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval);
514 q->warmup_time = msecs_to_jiffies(ctl->warmup_time);
515 q->rehash_time = jiffies;
516 q->limit = limit;
517 q->increment = ctl->increment;
518 q->decrement = ctl->decrement;
519 q->max = ctl->max;
520 q->bin_size = ctl->bin_size;
521 q->penalty_rate = ctl->penalty_rate;
522 q->penalty_burst = ctl->penalty_burst;
523 q->tokens_avail = ctl->penalty_burst;
524 q->token_time = jiffies;
526 q->slot = 0;
527 q->double_buffering = false;
528 sfb_zero_all_buckets(q);
529 sfb_init_perturbation(0, q);
530 sfb_init_perturbation(1, q);
532 sch_tree_unlock(sch);
534 return 0;
537 static int sfb_init(struct Qdisc *sch, struct nlattr *opt)
539 struct sfb_sched_data *q = qdisc_priv(sch);
541 q->qdisc = &noop_qdisc;
542 return sfb_change(sch, opt);
545 static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
547 struct sfb_sched_data *q = qdisc_priv(sch);
548 struct nlattr *opts;
549 struct tc_sfb_qopt opt = {
550 .rehash_interval = jiffies_to_msecs(q->rehash_interval),
551 .warmup_time = jiffies_to_msecs(q->warmup_time),
552 .limit = q->limit,
553 .max = q->max,
554 .bin_size = q->bin_size,
555 .increment = q->increment,
556 .decrement = q->decrement,
557 .penalty_rate = q->penalty_rate,
558 .penalty_burst = q->penalty_burst,
561 sch->qstats.backlog = q->qdisc->qstats.backlog;
562 opts = nla_nest_start(skb, TCA_OPTIONS);
563 NLA_PUT(skb, TCA_SFB_PARMS, sizeof(opt), &opt);
564 return nla_nest_end(skb, opts);
566 nla_put_failure:
567 nla_nest_cancel(skb, opts);
568 return -EMSGSIZE;
571 static int sfb_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
573 struct sfb_sched_data *q = qdisc_priv(sch);
574 struct tc_sfb_xstats st = {
575 .earlydrop = q->stats.earlydrop,
576 .penaltydrop = q->stats.penaltydrop,
577 .bucketdrop = q->stats.bucketdrop,
578 .queuedrop = q->stats.queuedrop,
579 .childdrop = q->stats.childdrop,
580 .marked = q->stats.marked,
583 st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q);
585 return gnet_stats_copy_app(d, &st, sizeof(st));
588 static int sfb_dump_class(struct Qdisc *sch, unsigned long cl,
589 struct sk_buff *skb, struct tcmsg *tcm)
591 return -ENOSYS;
594 static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
595 struct Qdisc **old)
597 struct sfb_sched_data *q = qdisc_priv(sch);
599 if (new == NULL)
600 new = &noop_qdisc;
602 sch_tree_lock(sch);
603 *old = q->qdisc;
604 q->qdisc = new;
605 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
606 qdisc_reset(*old);
607 sch_tree_unlock(sch);
608 return 0;
611 static struct Qdisc *sfb_leaf(struct Qdisc *sch, unsigned long arg)
613 struct sfb_sched_data *q = qdisc_priv(sch);
615 return q->qdisc;
618 static unsigned long sfb_get(struct Qdisc *sch, u32 classid)
620 return 1;
623 static void sfb_put(struct Qdisc *sch, unsigned long arg)
627 static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
628 struct nlattr **tca, unsigned long *arg)
630 return -ENOSYS;
633 static int sfb_delete(struct Qdisc *sch, unsigned long cl)
635 return -ENOSYS;
638 static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker)
640 if (!walker->stop) {
641 if (walker->count >= walker->skip)
642 if (walker->fn(sch, 1, walker) < 0) {
643 walker->stop = 1;
644 return;
646 walker->count++;
650 static struct tcf_proto **sfb_find_tcf(struct Qdisc *sch, unsigned long cl)
652 struct sfb_sched_data *q = qdisc_priv(sch);
654 if (cl)
655 return NULL;
656 return &q->filter_list;
659 static unsigned long sfb_bind(struct Qdisc *sch, unsigned long parent,
660 u32 classid)
662 return 0;
666 static const struct Qdisc_class_ops sfb_class_ops = {
667 .graft = sfb_graft,
668 .leaf = sfb_leaf,
669 .get = sfb_get,
670 .put = sfb_put,
671 .change = sfb_change_class,
672 .delete = sfb_delete,
673 .walk = sfb_walk,
674 .tcf_chain = sfb_find_tcf,
675 .bind_tcf = sfb_bind,
676 .unbind_tcf = sfb_put,
677 .dump = sfb_dump_class,
680 static struct Qdisc_ops sfb_qdisc_ops __read_mostly = {
681 .id = "sfb",
682 .priv_size = sizeof(struct sfb_sched_data),
683 .cl_ops = &sfb_class_ops,
684 .enqueue = sfb_enqueue,
685 .dequeue = sfb_dequeue,
686 .peek = sfb_peek,
687 .init = sfb_init,
688 .reset = sfb_reset,
689 .destroy = sfb_destroy,
690 .change = sfb_change,
691 .dump = sfb_dump,
692 .dump_stats = sfb_dump_stats,
693 .owner = THIS_MODULE,
696 static int __init sfb_module_init(void)
698 return register_qdisc(&sfb_qdisc_ops);
701 static void __exit sfb_module_exit(void)
703 unregister_qdisc(&sfb_qdisc_ops);
706 module_init(sfb_module_init)
707 module_exit(sfb_module_exit)
709 MODULE_DESCRIPTION("Stochastic Fair Blue queue discipline");
710 MODULE_AUTHOR("Juliusz Chroboczek");
711 MODULE_AUTHOR("Eric Dumazet");
712 MODULE_LICENSE("GPL");