ARM: 7409/1: Do not call flush_cache_user_range with mmap_sem held
[linux/fpc-iii.git] / net / sched / sch_gred.c
blobe1afe0c205fa4c0321659c29cbe5b161672956f2
1 /*
2 * net/sched/sch_gred.c Generic Random Early Detection queue.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
10 * Authors: J Hadi Salim (hadi@cyberus.ca) 1998-2002
12 * 991129: - Bug fix with grio mode
13 * - a better sing. AvgQ mode with Grio(WRED)
14 * - A finer grained VQ dequeue based on sugestion
15 * from Ren Liu
16 * - More error checks
18 * For all the glorious comments look at include/net/red.h
21 #include <linux/slab.h>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/skbuff.h>
26 #include <net/pkt_sched.h>
27 #include <net/red.h>
29 #define GRED_DEF_PRIO (MAX_DPs / 2)
30 #define GRED_VQ_MASK (MAX_DPs - 1)
32 struct gred_sched_data;
33 struct gred_sched;
35 struct gred_sched_data {
36 u32 limit; /* HARD maximal queue length */
37 u32 DP; /* the drop pramaters */
38 u32 bytesin; /* bytes seen on virtualQ so far*/
39 u32 packetsin; /* packets seen on virtualQ so far*/
40 u32 backlog; /* bytes on the virtualQ */
41 u8 prio; /* the prio of this vq */
43 struct red_parms parms;
44 struct red_stats stats;
47 enum {
48 GRED_WRED_MODE = 1,
49 GRED_RIO_MODE,
52 struct gred_sched {
53 struct gred_sched_data *tab[MAX_DPs];
54 unsigned long flags;
55 u32 red_flags;
56 u32 DPs;
57 u32 def;
58 struct red_parms wred_set;
61 static inline int gred_wred_mode(struct gred_sched *table)
63 return test_bit(GRED_WRED_MODE, &table->flags);
66 static inline void gred_enable_wred_mode(struct gred_sched *table)
68 __set_bit(GRED_WRED_MODE, &table->flags);
71 static inline void gred_disable_wred_mode(struct gred_sched *table)
73 __clear_bit(GRED_WRED_MODE, &table->flags);
76 static inline int gred_rio_mode(struct gred_sched *table)
78 return test_bit(GRED_RIO_MODE, &table->flags);
81 static inline void gred_enable_rio_mode(struct gred_sched *table)
83 __set_bit(GRED_RIO_MODE, &table->flags);
86 static inline void gred_disable_rio_mode(struct gred_sched *table)
88 __clear_bit(GRED_RIO_MODE, &table->flags);
91 static inline int gred_wred_mode_check(struct Qdisc *sch)
93 struct gred_sched *table = qdisc_priv(sch);
94 int i;
96 /* Really ugly O(n^2) but shouldn't be necessary too frequent. */
97 for (i = 0; i < table->DPs; i++) {
98 struct gred_sched_data *q = table->tab[i];
99 int n;
101 if (q == NULL)
102 continue;
104 for (n = 0; n < table->DPs; n++)
105 if (table->tab[n] && table->tab[n] != q &&
106 table->tab[n]->prio == q->prio)
107 return 1;
110 return 0;
113 static inline unsigned int gred_backlog(struct gred_sched *table,
114 struct gred_sched_data *q,
115 struct Qdisc *sch)
117 if (gred_wred_mode(table))
118 return sch->qstats.backlog;
119 else
120 return q->backlog;
123 static inline u16 tc_index_to_dp(struct sk_buff *skb)
125 return skb->tc_index & GRED_VQ_MASK;
128 static inline void gred_load_wred_set(struct gred_sched *table,
129 struct gred_sched_data *q)
131 q->parms.qavg = table->wred_set.qavg;
132 q->parms.qidlestart = table->wred_set.qidlestart;
135 static inline void gred_store_wred_set(struct gred_sched *table,
136 struct gred_sched_data *q)
138 table->wred_set.qavg = q->parms.qavg;
141 static inline int gred_use_ecn(struct gred_sched *t)
143 return t->red_flags & TC_RED_ECN;
146 static inline int gred_use_harddrop(struct gred_sched *t)
148 return t->red_flags & TC_RED_HARDDROP;
151 static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
153 struct gred_sched_data *q = NULL;
154 struct gred_sched *t = qdisc_priv(sch);
155 unsigned long qavg = 0;
156 u16 dp = tc_index_to_dp(skb);
158 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
159 dp = t->def;
161 q = t->tab[dp];
162 if (!q) {
163 /* Pass through packets not assigned to a DP
164 * if no default DP has been configured. This
165 * allows for DP flows to be left untouched.
167 if (skb_queue_len(&sch->q) < qdisc_dev(sch)->tx_queue_len)
168 return qdisc_enqueue_tail(skb, sch);
169 else
170 goto drop;
173 /* fix tc_index? --could be controvesial but needed for
174 requeueing */
175 skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
178 /* sum up all the qaves of prios <= to ours to get the new qave */
179 if (!gred_wred_mode(t) && gred_rio_mode(t)) {
180 int i;
182 for (i = 0; i < t->DPs; i++) {
183 if (t->tab[i] && t->tab[i]->prio < q->prio &&
184 !red_is_idling(&t->tab[i]->parms))
185 qavg += t->tab[i]->parms.qavg;
190 q->packetsin++;
191 q->bytesin += qdisc_pkt_len(skb);
193 if (gred_wred_mode(t))
194 gred_load_wred_set(t, q);
196 q->parms.qavg = red_calc_qavg(&q->parms, gred_backlog(t, q, sch));
198 if (red_is_idling(&q->parms))
199 red_end_of_idle_period(&q->parms);
201 if (gred_wred_mode(t))
202 gred_store_wred_set(t, q);
204 switch (red_action(&q->parms, q->parms.qavg + qavg)) {
205 case RED_DONT_MARK:
206 break;
208 case RED_PROB_MARK:
209 sch->qstats.overlimits++;
210 if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
211 q->stats.prob_drop++;
212 goto congestion_drop;
215 q->stats.prob_mark++;
216 break;
218 case RED_HARD_MARK:
219 sch->qstats.overlimits++;
220 if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
221 !INET_ECN_set_ce(skb)) {
222 q->stats.forced_drop++;
223 goto congestion_drop;
225 q->stats.forced_mark++;
226 break;
229 if (q->backlog + qdisc_pkt_len(skb) <= q->limit) {
230 q->backlog += qdisc_pkt_len(skb);
231 return qdisc_enqueue_tail(skb, sch);
234 q->stats.pdrop++;
235 drop:
236 return qdisc_drop(skb, sch);
238 congestion_drop:
239 qdisc_drop(skb, sch);
240 return NET_XMIT_CN;
243 static struct sk_buff *gred_dequeue(struct Qdisc *sch)
245 struct sk_buff *skb;
246 struct gred_sched *t = qdisc_priv(sch);
248 skb = qdisc_dequeue_head(sch);
250 if (skb) {
251 struct gred_sched_data *q;
252 u16 dp = tc_index_to_dp(skb);
254 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
255 if (net_ratelimit())
256 pr_warning("GRED: Unable to relocate VQ 0x%x "
257 "after dequeue, screwing up "
258 "backlog.\n", tc_index_to_dp(skb));
259 } else {
260 q->backlog -= qdisc_pkt_len(skb);
262 if (!q->backlog && !gred_wred_mode(t))
263 red_start_of_idle_period(&q->parms);
266 return skb;
269 if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
270 red_start_of_idle_period(&t->wred_set);
272 return NULL;
275 static unsigned int gred_drop(struct Qdisc *sch)
277 struct sk_buff *skb;
278 struct gred_sched *t = qdisc_priv(sch);
280 skb = qdisc_dequeue_tail(sch);
281 if (skb) {
282 unsigned int len = qdisc_pkt_len(skb);
283 struct gred_sched_data *q;
284 u16 dp = tc_index_to_dp(skb);
286 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
287 if (net_ratelimit())
288 pr_warning("GRED: Unable to relocate VQ 0x%x "
289 "while dropping, screwing up "
290 "backlog.\n", tc_index_to_dp(skb));
291 } else {
292 q->backlog -= len;
293 q->stats.other++;
295 if (!q->backlog && !gred_wred_mode(t))
296 red_start_of_idle_period(&q->parms);
299 qdisc_drop(skb, sch);
300 return len;
303 if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
304 red_start_of_idle_period(&t->wred_set);
306 return 0;
310 static void gred_reset(struct Qdisc *sch)
312 int i;
313 struct gred_sched *t = qdisc_priv(sch);
315 qdisc_reset_queue(sch);
317 for (i = 0; i < t->DPs; i++) {
318 struct gred_sched_data *q = t->tab[i];
320 if (!q)
321 continue;
323 red_restart(&q->parms);
324 q->backlog = 0;
328 static inline void gred_destroy_vq(struct gred_sched_data *q)
330 kfree(q);
333 static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
335 struct gred_sched *table = qdisc_priv(sch);
336 struct tc_gred_sopt *sopt;
337 int i;
339 if (dps == NULL)
340 return -EINVAL;
342 sopt = nla_data(dps);
344 if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs)
345 return -EINVAL;
347 sch_tree_lock(sch);
348 table->DPs = sopt->DPs;
349 table->def = sopt->def_DP;
350 table->red_flags = sopt->flags;
353 * Every entry point to GRED is synchronized with the above code
354 * and the DP is checked against DPs, i.e. shadowed VQs can no
355 * longer be found so we can unlock right here.
357 sch_tree_unlock(sch);
359 if (sopt->grio) {
360 gred_enable_rio_mode(table);
361 gred_disable_wred_mode(table);
362 if (gred_wred_mode_check(sch))
363 gred_enable_wred_mode(table);
364 } else {
365 gred_disable_rio_mode(table);
366 gred_disable_wred_mode(table);
369 for (i = table->DPs; i < MAX_DPs; i++) {
370 if (table->tab[i]) {
371 pr_warning("GRED: Warning: Destroying "
372 "shadowed VQ 0x%x\n", i);
373 gred_destroy_vq(table->tab[i]);
374 table->tab[i] = NULL;
378 return 0;
381 static inline int gred_change_vq(struct Qdisc *sch, int dp,
382 struct tc_gred_qopt *ctl, int prio, u8 *stab)
384 struct gred_sched *table = qdisc_priv(sch);
385 struct gred_sched_data *q;
387 if (table->tab[dp] == NULL) {
388 table->tab[dp] = kzalloc(sizeof(*q), GFP_ATOMIC);
389 if (table->tab[dp] == NULL)
390 return -ENOMEM;
393 q = table->tab[dp];
394 q->DP = dp;
395 q->prio = prio;
396 q->limit = ctl->limit;
398 if (q->backlog == 0)
399 red_end_of_idle_period(&q->parms);
401 red_set_parms(&q->parms,
402 ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
403 ctl->Scell_log, stab);
405 return 0;
408 static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
409 [TCA_GRED_PARMS] = { .len = sizeof(struct tc_gred_qopt) },
410 [TCA_GRED_STAB] = { .len = 256 },
411 [TCA_GRED_DPS] = { .len = sizeof(struct tc_gred_sopt) },
414 static int gred_change(struct Qdisc *sch, struct nlattr *opt)
416 struct gred_sched *table = qdisc_priv(sch);
417 struct tc_gred_qopt *ctl;
418 struct nlattr *tb[TCA_GRED_MAX + 1];
419 int err, prio = GRED_DEF_PRIO;
420 u8 *stab;
422 if (opt == NULL)
423 return -EINVAL;
425 err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy);
426 if (err < 0)
427 return err;
429 if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL)
430 return gred_change_table_def(sch, opt);
432 if (tb[TCA_GRED_PARMS] == NULL ||
433 tb[TCA_GRED_STAB] == NULL)
434 return -EINVAL;
436 err = -EINVAL;
437 ctl = nla_data(tb[TCA_GRED_PARMS]);
438 stab = nla_data(tb[TCA_GRED_STAB]);
440 if (ctl->DP >= table->DPs)
441 goto errout;
443 if (gred_rio_mode(table)) {
444 if (ctl->prio == 0) {
445 int def_prio = GRED_DEF_PRIO;
447 if (table->tab[table->def])
448 def_prio = table->tab[table->def]->prio;
450 printk(KERN_DEBUG "GRED: DP %u does not have a prio "
451 "setting default to %d\n", ctl->DP, def_prio);
453 prio = def_prio;
454 } else
455 prio = ctl->prio;
458 sch_tree_lock(sch);
460 err = gred_change_vq(sch, ctl->DP, ctl, prio, stab);
461 if (err < 0)
462 goto errout_locked;
464 if (gred_rio_mode(table)) {
465 gred_disable_wred_mode(table);
466 if (gred_wred_mode_check(sch))
467 gred_enable_wred_mode(table);
470 err = 0;
472 errout_locked:
473 sch_tree_unlock(sch);
474 errout:
475 return err;
478 static int gred_init(struct Qdisc *sch, struct nlattr *opt)
480 struct nlattr *tb[TCA_GRED_MAX + 1];
481 int err;
483 if (opt == NULL)
484 return -EINVAL;
486 err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy);
487 if (err < 0)
488 return err;
490 if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB])
491 return -EINVAL;
493 return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
496 static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
498 struct gred_sched *table = qdisc_priv(sch);
499 struct nlattr *parms, *opts = NULL;
500 int i;
501 struct tc_gred_sopt sopt = {
502 .DPs = table->DPs,
503 .def_DP = table->def,
504 .grio = gred_rio_mode(table),
505 .flags = table->red_flags,
508 opts = nla_nest_start(skb, TCA_OPTIONS);
509 if (opts == NULL)
510 goto nla_put_failure;
511 NLA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt);
512 parms = nla_nest_start(skb, TCA_GRED_PARMS);
513 if (parms == NULL)
514 goto nla_put_failure;
516 for (i = 0; i < MAX_DPs; i++) {
517 struct gred_sched_data *q = table->tab[i];
518 struct tc_gred_qopt opt;
520 memset(&opt, 0, sizeof(opt));
522 if (!q) {
523 /* hack -- fix at some point with proper message
524 This is how we indicate to tc that there is no VQ
525 at this DP */
527 opt.DP = MAX_DPs + i;
528 goto append_opt;
531 opt.limit = q->limit;
532 opt.DP = q->DP;
533 opt.backlog = q->backlog;
534 opt.prio = q->prio;
535 opt.qth_min = q->parms.qth_min >> q->parms.Wlog;
536 opt.qth_max = q->parms.qth_max >> q->parms.Wlog;
537 opt.Wlog = q->parms.Wlog;
538 opt.Plog = q->parms.Plog;
539 opt.Scell_log = q->parms.Scell_log;
540 opt.other = q->stats.other;
541 opt.early = q->stats.prob_drop;
542 opt.forced = q->stats.forced_drop;
543 opt.pdrop = q->stats.pdrop;
544 opt.packets = q->packetsin;
545 opt.bytesin = q->bytesin;
547 if (gred_wred_mode(table))
548 gred_load_wred_set(table, q);
550 opt.qave = red_calc_qavg(&q->parms, q->parms.qavg);
552 append_opt:
553 if (nla_append(skb, sizeof(opt), &opt) < 0)
554 goto nla_put_failure;
557 nla_nest_end(skb, parms);
559 return nla_nest_end(skb, opts);
561 nla_put_failure:
562 nla_nest_cancel(skb, opts);
563 return -EMSGSIZE;
566 static void gred_destroy(struct Qdisc *sch)
568 struct gred_sched *table = qdisc_priv(sch);
569 int i;
571 for (i = 0; i < table->DPs; i++) {
572 if (table->tab[i])
573 gred_destroy_vq(table->tab[i]);
577 static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
578 .id = "gred",
579 .priv_size = sizeof(struct gred_sched),
580 .enqueue = gred_enqueue,
581 .dequeue = gred_dequeue,
582 .peek = qdisc_peek_head,
583 .drop = gred_drop,
584 .init = gred_init,
585 .reset = gred_reset,
586 .destroy = gred_destroy,
587 .change = gred_change,
588 .dump = gred_dump,
589 .owner = THIS_MODULE,
592 static int __init gred_module_init(void)
594 return register_qdisc(&gred_qdisc_ops);
597 static void __exit gred_module_exit(void)
599 unregister_qdisc(&gred_qdisc_ops);
602 module_init(gred_module_init)
603 module_exit(gred_module_exit)
605 MODULE_LICENSE("GPL");