spi-topcliff-pch: Fix issue for transmitting over 4KByte
[zen-stable.git] / net / sched / sch_choke.c
blob7e267d7b9c75730ef8351798ec1540b92e31f101
1 /*
2 * net/sched/sch_choke.c CHOKE scheduler
4 * Copyright (c) 2011 Stephen Hemminger <shemminger@vyatta.com>
5 * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/skbuff.h>
17 #include <linux/reciprocal_div.h>
18 #include <linux/vmalloc.h>
19 #include <net/pkt_sched.h>
20 #include <net/inet_ecn.h>
21 #include <net/red.h>
22 #include <net/flow_keys.h>
25 CHOKe stateless AQM for fair bandwidth allocation
26 =================================================
28 CHOKe (CHOose and Keep for responsive flows, CHOose and Kill for
29 unresponsive flows) is a variant of RED that penalizes misbehaving flows but
30 maintains no flow state. The difference from RED is an additional step
31 during the enqueuing process. If average queue size is over the
32 low threshold (qmin), a packet is chosen at random from the queue.
33 If both the new and chosen packet are from the same flow, both
34 are dropped. Unlike RED, CHOKe is not really a "classful" qdisc because it
35 needs to access packets in queue randomly. It has a minimal class
36 interface to allow overriding the builtin flow classifier with
37 filters.
39 Source:
40 R. Pan, B. Prabhakar, and K. Psounis, "CHOKe, A Stateless
41 Active Queue Management Scheme for Approximating Fair Bandwidth Allocation",
42 IEEE INFOCOM, 2000.
44 A. Tang, J. Wang, S. Low, "Understanding CHOKe: Throughput and Spatial
45 Characteristics", IEEE/ACM Transactions on Networking, 2004
49 /* Upper bound on size of sk_buff table (packets) */
50 #define CHOKE_MAX_QUEUE (128*1024 - 1)
52 struct choke_sched_data {
53 /* Parameters */
54 u32 limit;
55 unsigned char flags;
57 struct red_parms parms;
59 /* Variables */
60 struct red_vars vars;
61 struct tcf_proto *filter_list;
62 struct {
63 u32 prob_drop; /* Early probability drops */
64 u32 prob_mark; /* Early probability marks */
65 u32 forced_drop; /* Forced drops, qavg > max_thresh */
66 u32 forced_mark; /* Forced marks, qavg > max_thresh */
67 u32 pdrop; /* Drops due to queue limits */
68 u32 other; /* Drops due to drop() calls */
69 u32 matched; /* Drops to flow match */
70 } stats;
72 unsigned int head;
73 unsigned int tail;
75 unsigned int tab_mask; /* size - 1 */
77 struct sk_buff **tab;
80 /* deliver a random number between 0 and N - 1 */
81 static u32 random_N(unsigned int N)
83 return reciprocal_divide(random32(), N);
86 /* number of elements in queue including holes */
87 static unsigned int choke_len(const struct choke_sched_data *q)
89 return (q->tail - q->head) & q->tab_mask;
92 /* Is ECN parameter configured */
93 static int use_ecn(const struct choke_sched_data *q)
95 return q->flags & TC_RED_ECN;
98 /* Should packets over max just be dropped (versus marked) */
99 static int use_harddrop(const struct choke_sched_data *q)
101 return q->flags & TC_RED_HARDDROP;
104 /* Move head pointer forward to skip over holes */
105 static void choke_zap_head_holes(struct choke_sched_data *q)
107 do {
108 q->head = (q->head + 1) & q->tab_mask;
109 if (q->head == q->tail)
110 break;
111 } while (q->tab[q->head] == NULL);
114 /* Move tail pointer backwards to reuse holes */
115 static void choke_zap_tail_holes(struct choke_sched_data *q)
117 do {
118 q->tail = (q->tail - 1) & q->tab_mask;
119 if (q->head == q->tail)
120 break;
121 } while (q->tab[q->tail] == NULL);
124 /* Drop packet from queue array by creating a "hole" */
125 static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
127 struct choke_sched_data *q = qdisc_priv(sch);
128 struct sk_buff *skb = q->tab[idx];
130 q->tab[idx] = NULL;
132 if (idx == q->head)
133 choke_zap_head_holes(q);
134 if (idx == q->tail)
135 choke_zap_tail_holes(q);
137 sch->qstats.backlog -= qdisc_pkt_len(skb);
138 qdisc_drop(skb, sch);
139 qdisc_tree_decrease_qlen(sch, 1);
140 --sch->q.qlen;
143 struct choke_skb_cb {
144 u16 classid;
145 u8 keys_valid;
146 struct flow_keys keys;
149 static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
151 qdisc_cb_private_validate(skb, sizeof(struct choke_skb_cb));
152 return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
155 static inline void choke_set_classid(struct sk_buff *skb, u16 classid)
157 choke_skb_cb(skb)->classid = classid;
160 static u16 choke_get_classid(const struct sk_buff *skb)
162 return choke_skb_cb(skb)->classid;
166 * Compare flow of two packets
167 * Returns true only if source and destination address and port match.
168 * false for special cases
170 static bool choke_match_flow(struct sk_buff *skb1,
171 struct sk_buff *skb2)
173 if (skb1->protocol != skb2->protocol)
174 return false;
176 if (!choke_skb_cb(skb1)->keys_valid) {
177 choke_skb_cb(skb1)->keys_valid = 1;
178 skb_flow_dissect(skb1, &choke_skb_cb(skb1)->keys);
181 if (!choke_skb_cb(skb2)->keys_valid) {
182 choke_skb_cb(skb2)->keys_valid = 1;
183 skb_flow_dissect(skb2, &choke_skb_cb(skb2)->keys);
186 return !memcmp(&choke_skb_cb(skb1)->keys,
187 &choke_skb_cb(skb2)->keys,
188 sizeof(struct flow_keys));
192 * Classify flow using either:
193 * 1. pre-existing classification result in skb
194 * 2. fast internal classification
195 * 3. use TC filter based classification
197 static bool choke_classify(struct sk_buff *skb,
198 struct Qdisc *sch, int *qerr)
201 struct choke_sched_data *q = qdisc_priv(sch);
202 struct tcf_result res;
203 int result;
205 result = tc_classify(skb, q->filter_list, &res);
206 if (result >= 0) {
207 #ifdef CONFIG_NET_CLS_ACT
208 switch (result) {
209 case TC_ACT_STOLEN:
210 case TC_ACT_QUEUED:
211 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
212 case TC_ACT_SHOT:
213 return false;
215 #endif
216 choke_set_classid(skb, TC_H_MIN(res.classid));
217 return true;
220 return false;
224 * Select a packet at random from queue
225 * HACK: since queue can have holes from previous deletion; retry several
226 * times to find a random skb but then just give up and return the head
227 * Will return NULL if queue is empty (q->head == q->tail)
229 static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
230 unsigned int *pidx)
232 struct sk_buff *skb;
233 int retrys = 3;
235 do {
236 *pidx = (q->head + random_N(choke_len(q))) & q->tab_mask;
237 skb = q->tab[*pidx];
238 if (skb)
239 return skb;
240 } while (--retrys > 0);
242 return q->tab[*pidx = q->head];
246 * Compare new packet with random packet in queue
247 * returns true if matched and sets *pidx
249 static bool choke_match_random(const struct choke_sched_data *q,
250 struct sk_buff *nskb,
251 unsigned int *pidx)
253 struct sk_buff *oskb;
255 if (q->head == q->tail)
256 return false;
258 oskb = choke_peek_random(q, pidx);
259 if (q->filter_list)
260 return choke_get_classid(nskb) == choke_get_classid(oskb);
262 return choke_match_flow(oskb, nskb);
265 static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
267 struct choke_sched_data *q = qdisc_priv(sch);
268 const struct red_parms *p = &q->parms;
269 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
271 if (q->filter_list) {
272 /* If using external classifiers, get result and record it. */
273 if (!choke_classify(skb, sch, &ret))
274 goto other_drop; /* Packet was eaten by filter */
277 choke_skb_cb(skb)->keys_valid = 0;
278 /* Compute average queue usage (see RED) */
279 q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen);
280 if (red_is_idling(&q->vars))
281 red_end_of_idle_period(&q->vars);
283 /* Is queue small? */
284 if (q->vars.qavg <= p->qth_min)
285 q->vars.qcount = -1;
286 else {
287 unsigned int idx;
289 /* Draw a packet at random from queue and compare flow */
290 if (choke_match_random(q, skb, &idx)) {
291 q->stats.matched++;
292 choke_drop_by_idx(sch, idx);
293 goto congestion_drop;
296 /* Queue is large, always mark/drop */
297 if (q->vars.qavg > p->qth_max) {
298 q->vars.qcount = -1;
300 sch->qstats.overlimits++;
301 if (use_harddrop(q) || !use_ecn(q) ||
302 !INET_ECN_set_ce(skb)) {
303 q->stats.forced_drop++;
304 goto congestion_drop;
307 q->stats.forced_mark++;
308 } else if (++q->vars.qcount) {
309 if (red_mark_probability(p, &q->vars, q->vars.qavg)) {
310 q->vars.qcount = 0;
311 q->vars.qR = red_random(p);
313 sch->qstats.overlimits++;
314 if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
315 q->stats.prob_drop++;
316 goto congestion_drop;
319 q->stats.prob_mark++;
321 } else
322 q->vars.qR = red_random(p);
325 /* Admit new packet */
326 if (sch->q.qlen < q->limit) {
327 q->tab[q->tail] = skb;
328 q->tail = (q->tail + 1) & q->tab_mask;
329 ++sch->q.qlen;
330 sch->qstats.backlog += qdisc_pkt_len(skb);
331 return NET_XMIT_SUCCESS;
334 q->stats.pdrop++;
335 sch->qstats.drops++;
336 kfree_skb(skb);
337 return NET_XMIT_DROP;
339 congestion_drop:
340 qdisc_drop(skb, sch);
341 return NET_XMIT_CN;
343 other_drop:
344 if (ret & __NET_XMIT_BYPASS)
345 sch->qstats.drops++;
346 kfree_skb(skb);
347 return ret;
350 static struct sk_buff *choke_dequeue(struct Qdisc *sch)
352 struct choke_sched_data *q = qdisc_priv(sch);
353 struct sk_buff *skb;
355 if (q->head == q->tail) {
356 if (!red_is_idling(&q->vars))
357 red_start_of_idle_period(&q->vars);
358 return NULL;
361 skb = q->tab[q->head];
362 q->tab[q->head] = NULL;
363 choke_zap_head_holes(q);
364 --sch->q.qlen;
365 sch->qstats.backlog -= qdisc_pkt_len(skb);
366 qdisc_bstats_update(sch, skb);
368 return skb;
371 static unsigned int choke_drop(struct Qdisc *sch)
373 struct choke_sched_data *q = qdisc_priv(sch);
374 unsigned int len;
376 len = qdisc_queue_drop(sch);
377 if (len > 0)
378 q->stats.other++;
379 else {
380 if (!red_is_idling(&q->vars))
381 red_start_of_idle_period(&q->vars);
384 return len;
387 static void choke_reset(struct Qdisc *sch)
389 struct choke_sched_data *q = qdisc_priv(sch);
391 red_restart(&q->vars);
394 static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
395 [TCA_CHOKE_PARMS] = { .len = sizeof(struct tc_red_qopt) },
396 [TCA_CHOKE_STAB] = { .len = RED_STAB_SIZE },
397 [TCA_CHOKE_MAX_P] = { .type = NLA_U32 },
401 static void choke_free(void *addr)
403 if (addr) {
404 if (is_vmalloc_addr(addr))
405 vfree(addr);
406 else
407 kfree(addr);
411 static int choke_change(struct Qdisc *sch, struct nlattr *opt)
413 struct choke_sched_data *q = qdisc_priv(sch);
414 struct nlattr *tb[TCA_CHOKE_MAX + 1];
415 const struct tc_red_qopt *ctl;
416 int err;
417 struct sk_buff **old = NULL;
418 unsigned int mask;
419 u32 max_P;
421 if (opt == NULL)
422 return -EINVAL;
424 err = nla_parse_nested(tb, TCA_CHOKE_MAX, opt, choke_policy);
425 if (err < 0)
426 return err;
428 if (tb[TCA_CHOKE_PARMS] == NULL ||
429 tb[TCA_CHOKE_STAB] == NULL)
430 return -EINVAL;
432 max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;
434 ctl = nla_data(tb[TCA_CHOKE_PARMS]);
436 if (ctl->limit > CHOKE_MAX_QUEUE)
437 return -EINVAL;
439 mask = roundup_pow_of_two(ctl->limit + 1) - 1;
440 if (mask != q->tab_mask) {
441 struct sk_buff **ntab;
443 ntab = kcalloc(mask + 1, sizeof(struct sk_buff *), GFP_KERNEL);
444 if (!ntab)
445 ntab = vzalloc((mask + 1) * sizeof(struct sk_buff *));
446 if (!ntab)
447 return -ENOMEM;
449 sch_tree_lock(sch);
450 old = q->tab;
451 if (old) {
452 unsigned int oqlen = sch->q.qlen, tail = 0;
454 while (q->head != q->tail) {
455 struct sk_buff *skb = q->tab[q->head];
457 q->head = (q->head + 1) & q->tab_mask;
458 if (!skb)
459 continue;
460 if (tail < mask) {
461 ntab[tail++] = skb;
462 continue;
464 sch->qstats.backlog -= qdisc_pkt_len(skb);
465 --sch->q.qlen;
466 qdisc_drop(skb, sch);
468 qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen);
469 q->head = 0;
470 q->tail = tail;
473 q->tab_mask = mask;
474 q->tab = ntab;
475 } else
476 sch_tree_lock(sch);
478 q->flags = ctl->flags;
479 q->limit = ctl->limit;
481 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
482 ctl->Plog, ctl->Scell_log,
483 nla_data(tb[TCA_CHOKE_STAB]),
484 max_P);
485 red_set_vars(&q->vars);
487 if (q->head == q->tail)
488 red_end_of_idle_period(&q->vars);
490 sch_tree_unlock(sch);
491 choke_free(old);
492 return 0;
495 static int choke_init(struct Qdisc *sch, struct nlattr *opt)
497 return choke_change(sch, opt);
500 static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
502 struct choke_sched_data *q = qdisc_priv(sch);
503 struct nlattr *opts = NULL;
504 struct tc_red_qopt opt = {
505 .limit = q->limit,
506 .flags = q->flags,
507 .qth_min = q->parms.qth_min >> q->parms.Wlog,
508 .qth_max = q->parms.qth_max >> q->parms.Wlog,
509 .Wlog = q->parms.Wlog,
510 .Plog = q->parms.Plog,
511 .Scell_log = q->parms.Scell_log,
514 opts = nla_nest_start(skb, TCA_OPTIONS);
515 if (opts == NULL)
516 goto nla_put_failure;
518 NLA_PUT(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt);
519 NLA_PUT_U32(skb, TCA_CHOKE_MAX_P, q->parms.max_P);
520 return nla_nest_end(skb, opts);
522 nla_put_failure:
523 nla_nest_cancel(skb, opts);
524 return -EMSGSIZE;
527 static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
529 struct choke_sched_data *q = qdisc_priv(sch);
530 struct tc_choke_xstats st = {
531 .early = q->stats.prob_drop + q->stats.forced_drop,
532 .marked = q->stats.prob_mark + q->stats.forced_mark,
533 .pdrop = q->stats.pdrop,
534 .other = q->stats.other,
535 .matched = q->stats.matched,
538 return gnet_stats_copy_app(d, &st, sizeof(st));
541 static void choke_destroy(struct Qdisc *sch)
543 struct choke_sched_data *q = qdisc_priv(sch);
545 tcf_destroy_chain(&q->filter_list);
546 choke_free(q->tab);
549 static struct Qdisc *choke_leaf(struct Qdisc *sch, unsigned long arg)
551 return NULL;
554 static unsigned long choke_get(struct Qdisc *sch, u32 classid)
556 return 0;
559 static void choke_put(struct Qdisc *q, unsigned long cl)
563 static unsigned long choke_bind(struct Qdisc *sch, unsigned long parent,
564 u32 classid)
566 return 0;
569 static struct tcf_proto **choke_find_tcf(struct Qdisc *sch, unsigned long cl)
571 struct choke_sched_data *q = qdisc_priv(sch);
573 if (cl)
574 return NULL;
575 return &q->filter_list;
578 static int choke_dump_class(struct Qdisc *sch, unsigned long cl,
579 struct sk_buff *skb, struct tcmsg *tcm)
581 tcm->tcm_handle |= TC_H_MIN(cl);
582 return 0;
585 static void choke_walk(struct Qdisc *sch, struct qdisc_walker *arg)
587 if (!arg->stop) {
588 if (arg->fn(sch, 1, arg) < 0) {
589 arg->stop = 1;
590 return;
592 arg->count++;
596 static const struct Qdisc_class_ops choke_class_ops = {
597 .leaf = choke_leaf,
598 .get = choke_get,
599 .put = choke_put,
600 .tcf_chain = choke_find_tcf,
601 .bind_tcf = choke_bind,
602 .unbind_tcf = choke_put,
603 .dump = choke_dump_class,
604 .walk = choke_walk,
607 static struct sk_buff *choke_peek_head(struct Qdisc *sch)
609 struct choke_sched_data *q = qdisc_priv(sch);
611 return (q->head != q->tail) ? q->tab[q->head] : NULL;
614 static struct Qdisc_ops choke_qdisc_ops __read_mostly = {
615 .id = "choke",
616 .priv_size = sizeof(struct choke_sched_data),
618 .enqueue = choke_enqueue,
619 .dequeue = choke_dequeue,
620 .peek = choke_peek_head,
621 .drop = choke_drop,
622 .init = choke_init,
623 .destroy = choke_destroy,
624 .reset = choke_reset,
625 .change = choke_change,
626 .dump = choke_dump,
627 .dump_stats = choke_dump_stats,
628 .owner = THIS_MODULE,
631 static int __init choke_module_init(void)
633 return register_qdisc(&choke_qdisc_ops);
636 static void __exit choke_module_exit(void)
638 unregister_qdisc(&choke_qdisc_ops);
641 module_init(choke_module_init)
642 module_exit(choke_module_exit)
644 MODULE_LICENSE("GPL");