rt2x00usb: fix anchor initialization
[linux/fpc-iii.git] / net / sched / sch_drr.c
blob8af5c59eef848db47cc33a878296693dabb44955
1 /*
2 * net/sched/sch_drr.c Deficit Round Robin scheduler
4 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/init.h>
14 #include <linux/errno.h>
15 #include <linux/netdevice.h>
16 #include <linux/pkt_sched.h>
17 #include <net/sch_generic.h>
18 #include <net/pkt_sched.h>
19 #include <net/pkt_cls.h>
21 struct drr_class {
22 struct Qdisc_class_common common;
23 unsigned int refcnt;
24 unsigned int filter_cnt;
26 struct gnet_stats_basic_packed bstats;
27 struct gnet_stats_queue qstats;
28 struct gnet_stats_rate_est64 rate_est;
29 struct list_head alist;
30 struct Qdisc *qdisc;
32 u32 quantum;
33 u32 deficit;
36 struct drr_sched {
37 struct list_head active;
38 struct tcf_proto __rcu *filter_list;
39 struct Qdisc_class_hash clhash;
42 static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
44 struct drr_sched *q = qdisc_priv(sch);
45 struct Qdisc_class_common *clc;
47 clc = qdisc_class_find(&q->clhash, classid);
48 if (clc == NULL)
49 return NULL;
50 return container_of(clc, struct drr_class, common);
53 static void drr_purge_queue(struct drr_class *cl)
55 unsigned int len = cl->qdisc->q.qlen;
56 unsigned int backlog = cl->qdisc->qstats.backlog;
58 qdisc_reset(cl->qdisc);
59 qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
62 static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
63 [TCA_DRR_QUANTUM] = { .type = NLA_U32 },
66 static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
67 struct nlattr **tca, unsigned long *arg)
69 struct drr_sched *q = qdisc_priv(sch);
70 struct drr_class *cl = (struct drr_class *)*arg;
71 struct nlattr *opt = tca[TCA_OPTIONS];
72 struct nlattr *tb[TCA_DRR_MAX + 1];
73 u32 quantum;
74 int err;
76 if (!opt)
77 return -EINVAL;
79 err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy);
80 if (err < 0)
81 return err;
83 if (tb[TCA_DRR_QUANTUM]) {
84 quantum = nla_get_u32(tb[TCA_DRR_QUANTUM]);
85 if (quantum == 0)
86 return -EINVAL;
87 } else
88 quantum = psched_mtu(qdisc_dev(sch));
90 if (cl != NULL) {
91 if (tca[TCA_RATE]) {
92 err = gen_replace_estimator(&cl->bstats, NULL,
93 &cl->rate_est,
94 NULL,
95 qdisc_root_sleeping_running(sch),
96 tca[TCA_RATE]);
97 if (err)
98 return err;
101 sch_tree_lock(sch);
102 if (tb[TCA_DRR_QUANTUM])
103 cl->quantum = quantum;
104 sch_tree_unlock(sch);
106 return 0;
109 cl = kzalloc(sizeof(struct drr_class), GFP_KERNEL);
110 if (cl == NULL)
111 return -ENOBUFS;
113 cl->refcnt = 1;
114 cl->common.classid = classid;
115 cl->quantum = quantum;
116 cl->qdisc = qdisc_create_dflt(sch->dev_queue,
117 &pfifo_qdisc_ops, classid);
118 if (cl->qdisc == NULL)
119 cl->qdisc = &noop_qdisc;
121 if (tca[TCA_RATE]) {
122 err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est,
123 NULL,
124 qdisc_root_sleeping_running(sch),
125 tca[TCA_RATE]);
126 if (err) {
127 qdisc_destroy(cl->qdisc);
128 kfree(cl);
129 return err;
133 sch_tree_lock(sch);
134 qdisc_class_hash_insert(&q->clhash, &cl->common);
135 sch_tree_unlock(sch);
137 qdisc_class_hash_grow(sch, &q->clhash);
139 *arg = (unsigned long)cl;
140 return 0;
143 static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl)
145 gen_kill_estimator(&cl->bstats, &cl->rate_est);
146 qdisc_destroy(cl->qdisc);
147 kfree(cl);
150 static int drr_delete_class(struct Qdisc *sch, unsigned long arg)
152 struct drr_sched *q = qdisc_priv(sch);
153 struct drr_class *cl = (struct drr_class *)arg;
155 if (cl->filter_cnt > 0)
156 return -EBUSY;
158 sch_tree_lock(sch);
160 drr_purge_queue(cl);
161 qdisc_class_hash_remove(&q->clhash, &cl->common);
163 BUG_ON(--cl->refcnt == 0);
165 * This shouldn't happen: we "hold" one cops->get() when called
166 * from tc_ctl_tclass; the destroy method is done from cops->put().
169 sch_tree_unlock(sch);
170 return 0;
173 static unsigned long drr_get_class(struct Qdisc *sch, u32 classid)
175 struct drr_class *cl = drr_find_class(sch, classid);
177 if (cl != NULL)
178 cl->refcnt++;
180 return (unsigned long)cl;
183 static void drr_put_class(struct Qdisc *sch, unsigned long arg)
185 struct drr_class *cl = (struct drr_class *)arg;
187 if (--cl->refcnt == 0)
188 drr_destroy_class(sch, cl);
191 static struct tcf_proto __rcu **drr_tcf_chain(struct Qdisc *sch,
192 unsigned long cl)
194 struct drr_sched *q = qdisc_priv(sch);
196 if (cl)
197 return NULL;
199 return &q->filter_list;
202 static unsigned long drr_bind_tcf(struct Qdisc *sch, unsigned long parent,
203 u32 classid)
205 struct drr_class *cl = drr_find_class(sch, classid);
207 if (cl != NULL)
208 cl->filter_cnt++;
210 return (unsigned long)cl;
213 static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg)
215 struct drr_class *cl = (struct drr_class *)arg;
217 cl->filter_cnt--;
220 static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
221 struct Qdisc *new, struct Qdisc **old)
223 struct drr_class *cl = (struct drr_class *)arg;
225 if (new == NULL) {
226 new = qdisc_create_dflt(sch->dev_queue,
227 &pfifo_qdisc_ops, cl->common.classid);
228 if (new == NULL)
229 new = &noop_qdisc;
232 *old = qdisc_replace(sch, new, &cl->qdisc);
233 return 0;
236 static struct Qdisc *drr_class_leaf(struct Qdisc *sch, unsigned long arg)
238 struct drr_class *cl = (struct drr_class *)arg;
240 return cl->qdisc;
243 static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg)
245 struct drr_class *cl = (struct drr_class *)arg;
247 if (cl->qdisc->q.qlen == 0)
248 list_del(&cl->alist);
251 static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
252 struct sk_buff *skb, struct tcmsg *tcm)
254 struct drr_class *cl = (struct drr_class *)arg;
255 struct nlattr *nest;
257 tcm->tcm_parent = TC_H_ROOT;
258 tcm->tcm_handle = cl->common.classid;
259 tcm->tcm_info = cl->qdisc->handle;
261 nest = nla_nest_start(skb, TCA_OPTIONS);
262 if (nest == NULL)
263 goto nla_put_failure;
264 if (nla_put_u32(skb, TCA_DRR_QUANTUM, cl->quantum))
265 goto nla_put_failure;
266 return nla_nest_end(skb, nest);
268 nla_put_failure:
269 nla_nest_cancel(skb, nest);
270 return -EMSGSIZE;
273 static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
274 struct gnet_dump *d)
276 struct drr_class *cl = (struct drr_class *)arg;
277 __u32 qlen = cl->qdisc->q.qlen;
278 struct tc_drr_stats xstats;
280 memset(&xstats, 0, sizeof(xstats));
281 if (qlen)
282 xstats.deficit = cl->deficit;
284 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
285 d, NULL, &cl->bstats) < 0 ||
286 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
287 gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0)
288 return -1;
290 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
293 static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg)
295 struct drr_sched *q = qdisc_priv(sch);
296 struct drr_class *cl;
297 unsigned int i;
299 if (arg->stop)
300 return;
302 for (i = 0; i < q->clhash.hashsize; i++) {
303 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
304 if (arg->count < arg->skip) {
305 arg->count++;
306 continue;
308 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
309 arg->stop = 1;
310 return;
312 arg->count++;
317 static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
318 int *qerr)
320 struct drr_sched *q = qdisc_priv(sch);
321 struct drr_class *cl;
322 struct tcf_result res;
323 struct tcf_proto *fl;
324 int result;
326 if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
327 cl = drr_find_class(sch, skb->priority);
328 if (cl != NULL)
329 return cl;
332 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
333 fl = rcu_dereference_bh(q->filter_list);
334 result = tc_classify(skb, fl, &res, false);
335 if (result >= 0) {
336 #ifdef CONFIG_NET_CLS_ACT
337 switch (result) {
338 case TC_ACT_QUEUED:
339 case TC_ACT_STOLEN:
340 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
341 case TC_ACT_SHOT:
342 return NULL;
344 #endif
345 cl = (struct drr_class *)res.class;
346 if (cl == NULL)
347 cl = drr_find_class(sch, res.classid);
348 return cl;
350 return NULL;
353 static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
354 struct sk_buff **to_free)
356 struct drr_sched *q = qdisc_priv(sch);
357 struct drr_class *cl;
358 int err = 0;
360 cl = drr_classify(skb, sch, &err);
361 if (cl == NULL) {
362 if (err & __NET_XMIT_BYPASS)
363 qdisc_qstats_drop(sch);
364 __qdisc_drop(skb, to_free);
365 return err;
368 err = qdisc_enqueue(skb, cl->qdisc, to_free);
369 if (unlikely(err != NET_XMIT_SUCCESS)) {
370 if (net_xmit_drop_count(err)) {
371 cl->qstats.drops++;
372 qdisc_qstats_drop(sch);
374 return err;
377 if (cl->qdisc->q.qlen == 1) {
378 list_add_tail(&cl->alist, &q->active);
379 cl->deficit = cl->quantum;
382 qdisc_qstats_backlog_inc(sch, skb);
383 sch->q.qlen++;
384 return err;
387 static struct sk_buff *drr_dequeue(struct Qdisc *sch)
389 struct drr_sched *q = qdisc_priv(sch);
390 struct drr_class *cl;
391 struct sk_buff *skb;
392 unsigned int len;
394 if (list_empty(&q->active))
395 goto out;
396 while (1) {
397 cl = list_first_entry(&q->active, struct drr_class, alist);
398 skb = cl->qdisc->ops->peek(cl->qdisc);
399 if (skb == NULL) {
400 qdisc_warn_nonwc(__func__, cl->qdisc);
401 goto out;
404 len = qdisc_pkt_len(skb);
405 if (len <= cl->deficit) {
406 cl->deficit -= len;
407 skb = qdisc_dequeue_peeked(cl->qdisc);
408 if (unlikely(skb == NULL))
409 goto out;
410 if (cl->qdisc->q.qlen == 0)
411 list_del(&cl->alist);
413 bstats_update(&cl->bstats, skb);
414 qdisc_bstats_update(sch, skb);
415 qdisc_qstats_backlog_dec(sch, skb);
416 sch->q.qlen--;
417 return skb;
420 cl->deficit += cl->quantum;
421 list_move_tail(&cl->alist, &q->active);
423 out:
424 return NULL;
427 static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
429 struct drr_sched *q = qdisc_priv(sch);
430 int err;
432 err = qdisc_class_hash_init(&q->clhash);
433 if (err < 0)
434 return err;
435 INIT_LIST_HEAD(&q->active);
436 return 0;
439 static void drr_reset_qdisc(struct Qdisc *sch)
441 struct drr_sched *q = qdisc_priv(sch);
442 struct drr_class *cl;
443 unsigned int i;
445 for (i = 0; i < q->clhash.hashsize; i++) {
446 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
447 if (cl->qdisc->q.qlen)
448 list_del(&cl->alist);
449 qdisc_reset(cl->qdisc);
452 sch->qstats.backlog = 0;
453 sch->q.qlen = 0;
456 static void drr_destroy_qdisc(struct Qdisc *sch)
458 struct drr_sched *q = qdisc_priv(sch);
459 struct drr_class *cl;
460 struct hlist_node *next;
461 unsigned int i;
463 tcf_destroy_chain(&q->filter_list);
465 for (i = 0; i < q->clhash.hashsize; i++) {
466 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
467 common.hnode)
468 drr_destroy_class(sch, cl);
470 qdisc_class_hash_destroy(&q->clhash);
473 static const struct Qdisc_class_ops drr_class_ops = {
474 .change = drr_change_class,
475 .delete = drr_delete_class,
476 .get = drr_get_class,
477 .put = drr_put_class,
478 .tcf_chain = drr_tcf_chain,
479 .bind_tcf = drr_bind_tcf,
480 .unbind_tcf = drr_unbind_tcf,
481 .graft = drr_graft_class,
482 .leaf = drr_class_leaf,
483 .qlen_notify = drr_qlen_notify,
484 .dump = drr_dump_class,
485 .dump_stats = drr_dump_class_stats,
486 .walk = drr_walk,
489 static struct Qdisc_ops drr_qdisc_ops __read_mostly = {
490 .cl_ops = &drr_class_ops,
491 .id = "drr",
492 .priv_size = sizeof(struct drr_sched),
493 .enqueue = drr_enqueue,
494 .dequeue = drr_dequeue,
495 .peek = qdisc_peek_dequeued,
496 .init = drr_init_qdisc,
497 .reset = drr_reset_qdisc,
498 .destroy = drr_destroy_qdisc,
499 .owner = THIS_MODULE,
502 static int __init drr_init(void)
504 return register_qdisc(&drr_qdisc_ops);
507 static void __exit drr_exit(void)
509 unregister_qdisc(&drr_qdisc_ops);
512 module_init(drr_init);
513 module_exit(drr_exit);
514 MODULE_LICENSE("GPL");