iio: magnetometer: ak8974: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / net / sched / sch_htb.c
blob95fe75d441eb564d81accc9b3c7dfef1d26007e5
1 /*
2 * net/sched/sch_htb.c Hierarchical token bucket, feed tree version
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Martin Devera, <devik@cdi.cz>
11 * Credits (in time order) for older HTB versions:
12 * Stef Coene <stef.coene@docum.org>
13 * HTB support at LARTC mailing list
14 * Ondrej Kraus, <krauso@barr.cz>
15 * found missing INIT_QDISC(htb)
16 * Vladimir Smelhaus, Aamer Akhter, Bert Hubert
17 * helped a lot to locate nasty class stall bug
18 * Andi Kleen, Jamal Hadi, Bert Hubert
19 * code review and helpful comments on shaping
20 * Tomasz Wrona, <tw@eter.tym.pl>
21 * created test case so that I was able to fix nasty bug
22 * Wilfried Weissmann
23 * spotted bug in dequeue code and helped with fix
24 * Jiri Fojtasek
25 * fixed requeue routine
26 * and many others. thanks.
28 #include <linux/module.h>
29 #include <linux/moduleparam.h>
30 #include <linux/types.h>
31 #include <linux/kernel.h>
32 #include <linux/string.h>
33 #include <linux/errno.h>
34 #include <linux/skbuff.h>
35 #include <linux/list.h>
36 #include <linux/compiler.h>
37 #include <linux/rbtree.h>
38 #include <linux/workqueue.h>
39 #include <linux/slab.h>
40 #include <net/netlink.h>
41 #include <net/sch_generic.h>
42 #include <net/pkt_sched.h>
44 /* HTB algorithm.
45 Author: devik@cdi.cz
46 ========================================================================
47 HTB is like TBF with multiple classes. It is also similar to CBQ because
48 it allows to assign priority to each class in hierarchy.
49 In fact it is another implementation of Floyd's formal sharing.
51 Levels:
52 Each class is assigned level. Leaf has ALWAYS level 0 and root
53 classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
54 one less than their parent.
57 static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
58 #define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
60 #if HTB_VER >> 16 != TC_HTB_PROTOVER
61 #error "Mismatched sch_htb.c and pkt_sch.h"
62 #endif
64 /* Module parameter and sysfs export */
65 module_param (htb_hysteresis, int, 0640);
66 MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
68 static int htb_rate_est = 0; /* htb classes have a default rate estimator */
69 module_param(htb_rate_est, int, 0640);
70 MODULE_PARM_DESC(htb_rate_est, "setup a default rate estimator (4sec 16sec) for htb classes");
72 /* used internaly to keep status of single class */
73 enum htb_cmode {
74 HTB_CANT_SEND, /* class can't send and can't borrow */
75 HTB_MAY_BORROW, /* class can't send but may borrow */
76 HTB_CAN_SEND /* class can send */
79 struct htb_prio {
80 union {
81 struct rb_root row;
82 struct rb_root feed;
84 struct rb_node *ptr;
85 /* When class changes from state 1->2 and disconnects from
86 * parent's feed then we lost ptr value and start from the
87 * first child again. Here we store classid of the
88 * last valid ptr (used when ptr is NULL).
90 u32 last_ptr_id;
93 /* interior & leaf nodes; props specific to leaves are marked L:
94 * To reduce false sharing, place mostly read fields at beginning,
95 * and mostly written ones at the end.
97 struct htb_class {
98 struct Qdisc_class_common common;
99 struct psched_ratecfg rate;
100 struct psched_ratecfg ceil;
101 s64 buffer, cbuffer;/* token bucket depth/rate */
102 s64 mbuffer; /* max wait time */
103 u32 prio; /* these two are used only by leaves... */
104 int quantum; /* but stored for parent-to-leaf return */
106 struct tcf_proto __rcu *filter_list; /* class attached filters */
107 int filter_cnt;
108 int refcnt; /* usage count of this class */
110 int level; /* our level (see above) */
111 unsigned int children;
112 struct htb_class *parent; /* parent class */
114 struct gnet_stats_rate_est64 rate_est;
117 * Written often fields
119 struct gnet_stats_basic_packed bstats;
120 struct tc_htb_xstats xstats; /* our special stats */
122 /* token bucket parameters */
123 s64 tokens, ctokens;/* current number of tokens */
124 s64 t_c; /* checkpoint time */
126 union {
127 struct htb_class_leaf {
128 struct list_head drop_list;
129 int deficit[TC_HTB_MAXDEPTH];
130 struct Qdisc *q;
131 } leaf;
132 struct htb_class_inner {
133 struct htb_prio clprio[TC_HTB_NUMPRIO];
134 } inner;
135 } un;
136 s64 pq_key;
138 int prio_activity; /* for which prios are we active */
139 enum htb_cmode cmode; /* current mode of the class */
140 struct rb_node pq_node; /* node for event queue */
141 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
143 unsigned int drops ____cacheline_aligned_in_smp;
146 struct htb_level {
147 struct rb_root wait_pq;
148 struct htb_prio hprio[TC_HTB_NUMPRIO];
151 struct htb_sched {
152 struct Qdisc_class_hash clhash;
153 int defcls; /* class where unclassified flows go to */
154 int rate2quantum; /* quant = rate / rate2quantum */
156 /* filters for qdisc itself */
157 struct tcf_proto __rcu *filter_list;
159 #define HTB_WARN_TOOMANYEVENTS 0x1
160 unsigned int warned; /* only one warning */
161 int direct_qlen;
162 struct work_struct work;
164 /* non shaped skbs; let them go directly thru */
165 struct qdisc_skb_head direct_queue;
166 long direct_pkts;
168 struct qdisc_watchdog watchdog;
170 s64 now; /* cached dequeue time */
171 struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
173 /* time of nearest event per level (row) */
174 s64 near_ev_cache[TC_HTB_MAXDEPTH];
176 int row_mask[TC_HTB_MAXDEPTH];
178 struct htb_level hlevel[TC_HTB_MAXDEPTH];
181 /* find class in global hash table using given handle */
182 static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
184 struct htb_sched *q = qdisc_priv(sch);
185 struct Qdisc_class_common *clc;
187 clc = qdisc_class_find(&q->clhash, handle);
188 if (clc == NULL)
189 return NULL;
190 return container_of(clc, struct htb_class, common);
194 * htb_classify - classify a packet into class
196 * It returns NULL if the packet should be dropped or -1 if the packet
197 * should be passed directly thru. In all other cases leaf class is returned.
198 * We allow direct class selection by classid in priority. The we examine
199 * filters in qdisc and in inner nodes (if higher filter points to the inner
200 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
201 * internal fifo (direct). These packets then go directly thru. If we still
202 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
203 * then finish and return direct queue.
205 #define HTB_DIRECT ((struct htb_class *)-1L)
207 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
208 int *qerr)
210 struct htb_sched *q = qdisc_priv(sch);
211 struct htb_class *cl;
212 struct tcf_result res;
213 struct tcf_proto *tcf;
214 int result;
216 /* allow to select class by setting skb->priority to valid classid;
217 * note that nfmark can be used too by attaching filter fw with no
218 * rules in it
220 if (skb->priority == sch->handle)
221 return HTB_DIRECT; /* X:0 (direct flow) selected */
222 cl = htb_find(skb->priority, sch);
223 if (cl) {
224 if (cl->level == 0)
225 return cl;
226 /* Start with inner filter chain if a non-leaf class is selected */
227 tcf = rcu_dereference_bh(cl->filter_list);
228 } else {
229 tcf = rcu_dereference_bh(q->filter_list);
232 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
233 while (tcf && (result = tc_classify(skb, tcf, &res, false)) >= 0) {
234 #ifdef CONFIG_NET_CLS_ACT
235 switch (result) {
236 case TC_ACT_QUEUED:
237 case TC_ACT_STOLEN:
238 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
239 case TC_ACT_SHOT:
240 return NULL;
242 #endif
243 cl = (void *)res.class;
244 if (!cl) {
245 if (res.classid == sch->handle)
246 return HTB_DIRECT; /* X:0 (direct flow) */
247 cl = htb_find(res.classid, sch);
248 if (!cl)
249 break; /* filter selected invalid classid */
251 if (!cl->level)
252 return cl; /* we hit leaf; return it */
254 /* we have got inner class; apply inner filter chain */
255 tcf = rcu_dereference_bh(cl->filter_list);
257 /* classification failed; try to use default class */
258 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
259 if (!cl || cl->level)
260 return HTB_DIRECT; /* bad default .. this is safe bet */
261 return cl;
265 * htb_add_to_id_tree - adds class to the round robin list
267 * Routine adds class to the list (actually tree) sorted by classid.
268 * Make sure that class is not already on such list for given prio.
270 static void htb_add_to_id_tree(struct rb_root *root,
271 struct htb_class *cl, int prio)
273 struct rb_node **p = &root->rb_node, *parent = NULL;
275 while (*p) {
276 struct htb_class *c;
277 parent = *p;
278 c = rb_entry(parent, struct htb_class, node[prio]);
280 if (cl->common.classid > c->common.classid)
281 p = &parent->rb_right;
282 else
283 p = &parent->rb_left;
285 rb_link_node(&cl->node[prio], parent, p);
286 rb_insert_color(&cl->node[prio], root);
290 * htb_add_to_wait_tree - adds class to the event queue with delay
292 * The class is added to priority event queue to indicate that class will
293 * change its mode in cl->pq_key microseconds. Make sure that class is not
294 * already in the queue.
296 static void htb_add_to_wait_tree(struct htb_sched *q,
297 struct htb_class *cl, s64 delay)
299 struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL;
301 cl->pq_key = q->now + delay;
302 if (cl->pq_key == q->now)
303 cl->pq_key++;
305 /* update the nearest event cache */
306 if (q->near_ev_cache[cl->level] > cl->pq_key)
307 q->near_ev_cache[cl->level] = cl->pq_key;
309 while (*p) {
310 struct htb_class *c;
311 parent = *p;
312 c = rb_entry(parent, struct htb_class, pq_node);
313 if (cl->pq_key >= c->pq_key)
314 p = &parent->rb_right;
315 else
316 p = &parent->rb_left;
318 rb_link_node(&cl->pq_node, parent, p);
319 rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
323 * htb_next_rb_node - finds next node in binary tree
325 * When we are past last key we return NULL.
326 * Average complexity is 2 steps per call.
328 static inline void htb_next_rb_node(struct rb_node **n)
330 *n = rb_next(*n);
334 * htb_add_class_to_row - add class to its row
336 * The class is added to row at priorities marked in mask.
337 * It does nothing if mask == 0.
339 static inline void htb_add_class_to_row(struct htb_sched *q,
340 struct htb_class *cl, int mask)
342 q->row_mask[cl->level] |= mask;
343 while (mask) {
344 int prio = ffz(~mask);
345 mask &= ~(1 << prio);
346 htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio);
350 /* If this triggers, it is a bug in this code, but it need not be fatal */
351 static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
353 if (RB_EMPTY_NODE(rb)) {
354 WARN_ON(1);
355 } else {
356 rb_erase(rb, root);
357 RB_CLEAR_NODE(rb);
363 * htb_remove_class_from_row - removes class from its row
365 * The class is removed from row at priorities marked in mask.
366 * It does nothing if mask == 0.
368 static inline void htb_remove_class_from_row(struct htb_sched *q,
369 struct htb_class *cl, int mask)
371 int m = 0;
372 struct htb_level *hlevel = &q->hlevel[cl->level];
374 while (mask) {
375 int prio = ffz(~mask);
376 struct htb_prio *hprio = &hlevel->hprio[prio];
378 mask &= ~(1 << prio);
379 if (hprio->ptr == cl->node + prio)
380 htb_next_rb_node(&hprio->ptr);
382 htb_safe_rb_erase(cl->node + prio, &hprio->row);
383 if (!hprio->row.rb_node)
384 m |= 1 << prio;
386 q->row_mask[cl->level] &= ~m;
390 * htb_activate_prios - creates active classe's feed chain
392 * The class is connected to ancestors and/or appropriate rows
393 * for priorities it is participating on. cl->cmode must be new
394 * (activated) mode. It does nothing if cl->prio_activity == 0.
396 static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
398 struct htb_class *p = cl->parent;
399 long m, mask = cl->prio_activity;
401 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
402 m = mask;
403 while (m) {
404 int prio = ffz(~m);
405 m &= ~(1 << prio);
407 if (p->un.inner.clprio[prio].feed.rb_node)
408 /* parent already has its feed in use so that
409 * reset bit in mask as parent is already ok
411 mask &= ~(1 << prio);
413 htb_add_to_id_tree(&p->un.inner.clprio[prio].feed, cl, prio);
415 p->prio_activity |= mask;
416 cl = p;
417 p = cl->parent;
420 if (cl->cmode == HTB_CAN_SEND && mask)
421 htb_add_class_to_row(q, cl, mask);
425 * htb_deactivate_prios - remove class from feed chain
427 * cl->cmode must represent old mode (before deactivation). It does
428 * nothing if cl->prio_activity == 0. Class is removed from all feed
429 * chains and rows.
431 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
433 struct htb_class *p = cl->parent;
434 long m, mask = cl->prio_activity;
436 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
437 m = mask;
438 mask = 0;
439 while (m) {
440 int prio = ffz(~m);
441 m &= ~(1 << prio);
443 if (p->un.inner.clprio[prio].ptr == cl->node + prio) {
444 /* we are removing child which is pointed to from
445 * parent feed - forget the pointer but remember
446 * classid
448 p->un.inner.clprio[prio].last_ptr_id = cl->common.classid;
449 p->un.inner.clprio[prio].ptr = NULL;
452 htb_safe_rb_erase(cl->node + prio,
453 &p->un.inner.clprio[prio].feed);
455 if (!p->un.inner.clprio[prio].feed.rb_node)
456 mask |= 1 << prio;
459 p->prio_activity &= ~mask;
460 cl = p;
461 p = cl->parent;
464 if (cl->cmode == HTB_CAN_SEND && mask)
465 htb_remove_class_from_row(q, cl, mask);
468 static inline s64 htb_lowater(const struct htb_class *cl)
470 if (htb_hysteresis)
471 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
472 else
473 return 0;
475 static inline s64 htb_hiwater(const struct htb_class *cl)
477 if (htb_hysteresis)
478 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
479 else
480 return 0;
485 * htb_class_mode - computes and returns current class mode
487 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
488 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
489 * from now to time when cl will change its state.
490 * Also it is worth to note that class mode doesn't change simply
491 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
492 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
493 * mode transitions per time unit. The speed gain is about 1/6.
495 static inline enum htb_cmode
496 htb_class_mode(struct htb_class *cl, s64 *diff)
498 s64 toks;
500 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
501 *diff = -toks;
502 return HTB_CANT_SEND;
505 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
506 return HTB_CAN_SEND;
508 *diff = -toks;
509 return HTB_MAY_BORROW;
513 * htb_change_class_mode - changes classe's mode
515 * This should be the only way how to change classe's mode under normal
516 * cirsumstances. Routine will update feed lists linkage, change mode
517 * and add class to the wait event queue if appropriate. New mode should
518 * be different from old one and cl->pq_key has to be valid if changing
519 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
521 static void
522 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
524 enum htb_cmode new_mode = htb_class_mode(cl, diff);
526 if (new_mode == cl->cmode)
527 return;
529 if (cl->prio_activity) { /* not necessary: speed optimization */
530 if (cl->cmode != HTB_CANT_SEND)
531 htb_deactivate_prios(q, cl);
532 cl->cmode = new_mode;
533 if (new_mode != HTB_CANT_SEND)
534 htb_activate_prios(q, cl);
535 } else
536 cl->cmode = new_mode;
540 * htb_activate - inserts leaf cl into appropriate active feeds
542 * Routine learns (new) priority of leaf and activates feed chain
543 * for the prio. It can be called on already active leaf safely.
544 * It also adds leaf into droplist.
546 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
548 WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen);
550 if (!cl->prio_activity) {
551 cl->prio_activity = 1 << cl->prio;
552 htb_activate_prios(q, cl);
553 list_add_tail(&cl->un.leaf.drop_list,
554 q->drops + cl->prio);
559 * htb_deactivate - remove leaf cl from active feeds
561 * Make sure that leaf is active. In the other words it can't be called
562 * with non-active leaf. It also removes class from the drop list.
564 static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
566 WARN_ON(!cl->prio_activity);
568 htb_deactivate_prios(q, cl);
569 cl->prio_activity = 0;
570 list_del_init(&cl->un.leaf.drop_list);
573 static void htb_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
574 struct qdisc_skb_head *qh)
576 struct sk_buff *last = qh->tail;
578 if (last) {
579 skb->next = NULL;
580 last->next = skb;
581 qh->tail = skb;
582 } else {
583 qh->tail = skb;
584 qh->head = skb;
586 qh->qlen++;
589 static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
590 struct sk_buff **to_free)
592 int uninitialized_var(ret);
593 struct htb_sched *q = qdisc_priv(sch);
594 struct htb_class *cl = htb_classify(skb, sch, &ret);
596 if (cl == HTB_DIRECT) {
597 /* enqueue to helper queue */
598 if (q->direct_queue.qlen < q->direct_qlen) {
599 htb_enqueue_tail(skb, sch, &q->direct_queue);
600 q->direct_pkts++;
601 } else {
602 return qdisc_drop(skb, sch, to_free);
604 #ifdef CONFIG_NET_CLS_ACT
605 } else if (!cl) {
606 if (ret & __NET_XMIT_BYPASS)
607 qdisc_qstats_drop(sch);
608 __qdisc_drop(skb, to_free);
609 return ret;
610 #endif
611 } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q,
612 to_free)) != NET_XMIT_SUCCESS) {
613 if (net_xmit_drop_count(ret)) {
614 qdisc_qstats_drop(sch);
615 cl->drops++;
617 return ret;
618 } else {
619 htb_activate(q, cl);
622 qdisc_qstats_backlog_inc(sch, skb);
623 sch->q.qlen++;
624 return NET_XMIT_SUCCESS;
627 static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
629 s64 toks = diff + cl->tokens;
631 if (toks > cl->buffer)
632 toks = cl->buffer;
633 toks -= (s64) psched_l2t_ns(&cl->rate, bytes);
634 if (toks <= -cl->mbuffer)
635 toks = 1 - cl->mbuffer;
637 cl->tokens = toks;
640 static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
642 s64 toks = diff + cl->ctokens;
644 if (toks > cl->cbuffer)
645 toks = cl->cbuffer;
646 toks -= (s64) psched_l2t_ns(&cl->ceil, bytes);
647 if (toks <= -cl->mbuffer)
648 toks = 1 - cl->mbuffer;
650 cl->ctokens = toks;
654 * htb_charge_class - charges amount "bytes" to leaf and ancestors
656 * Routine assumes that packet "bytes" long was dequeued from leaf cl
657 * borrowing from "level". It accounts bytes to ceil leaky bucket for
658 * leaf and all ancestors and to rate bucket for ancestors at levels
659 * "level" and higher. It also handles possible change of mode resulting
660 * from the update. Note that mode can also increase here (MAY_BORROW to
661 * CAN_SEND) because we can use more precise clock that event queue here.
662 * In such case we remove class from event queue first.
664 static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
665 int level, struct sk_buff *skb)
667 int bytes = qdisc_pkt_len(skb);
668 enum htb_cmode old_mode;
669 s64 diff;
671 while (cl) {
672 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
673 if (cl->level >= level) {
674 if (cl->level == level)
675 cl->xstats.lends++;
676 htb_accnt_tokens(cl, bytes, diff);
677 } else {
678 cl->xstats.borrows++;
679 cl->tokens += diff; /* we moved t_c; update tokens */
681 htb_accnt_ctokens(cl, bytes, diff);
682 cl->t_c = q->now;
684 old_mode = cl->cmode;
685 diff = 0;
686 htb_change_class_mode(q, cl, &diff);
687 if (old_mode != cl->cmode) {
688 if (old_mode != HTB_CAN_SEND)
689 htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
690 if (cl->cmode != HTB_CAN_SEND)
691 htb_add_to_wait_tree(q, cl, diff);
694 /* update basic stats except for leaves which are already updated */
695 if (cl->level)
696 bstats_update(&cl->bstats, skb);
698 cl = cl->parent;
703 * htb_do_events - make mode changes to classes at the level
705 * Scans event queue for pending events and applies them. Returns time of
706 * next pending event (0 for no event in pq, q->now for too many events).
707 * Note: Applied are events whose have cl->pq_key <= q->now.
709 static s64 htb_do_events(struct htb_sched *q, const int level,
710 unsigned long start)
712 /* don't run for longer than 2 jiffies; 2 is used instead of
713 * 1 to simplify things when jiffy is going to be incremented
714 * too soon
716 unsigned long stop_at = start + 2;
717 struct rb_root *wait_pq = &q->hlevel[level].wait_pq;
719 while (time_before(jiffies, stop_at)) {
720 struct htb_class *cl;
721 s64 diff;
722 struct rb_node *p = rb_first(wait_pq);
724 if (!p)
725 return 0;
727 cl = rb_entry(p, struct htb_class, pq_node);
728 if (cl->pq_key > q->now)
729 return cl->pq_key;
731 htb_safe_rb_erase(p, wait_pq);
732 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
733 htb_change_class_mode(q, cl, &diff);
734 if (cl->cmode != HTB_CAN_SEND)
735 htb_add_to_wait_tree(q, cl, diff);
738 /* too much load - let's continue after a break for scheduling */
739 if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
740 pr_warn("htb: too many events!\n");
741 q->warned |= HTB_WARN_TOOMANYEVENTS;
744 return q->now;
747 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
748 * is no such one exists.
750 static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
751 u32 id)
753 struct rb_node *r = NULL;
754 while (n) {
755 struct htb_class *cl =
756 rb_entry(n, struct htb_class, node[prio]);
758 if (id > cl->common.classid) {
759 n = n->rb_right;
760 } else if (id < cl->common.classid) {
761 r = n;
762 n = n->rb_left;
763 } else {
764 return n;
767 return r;
771 * htb_lookup_leaf - returns next leaf class in DRR order
773 * Find leaf where current feed pointers points to.
775 static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
777 int i;
778 struct {
779 struct rb_node *root;
780 struct rb_node **pptr;
781 u32 *pid;
782 } stk[TC_HTB_MAXDEPTH], *sp = stk;
784 BUG_ON(!hprio->row.rb_node);
785 sp->root = hprio->row.rb_node;
786 sp->pptr = &hprio->ptr;
787 sp->pid = &hprio->last_ptr_id;
789 for (i = 0; i < 65535; i++) {
790 if (!*sp->pptr && *sp->pid) {
791 /* ptr was invalidated but id is valid - try to recover
792 * the original or next ptr
794 *sp->pptr =
795 htb_id_find_next_upper(prio, sp->root, *sp->pid);
797 *sp->pid = 0; /* ptr is valid now so that remove this hint as it
798 * can become out of date quickly
800 if (!*sp->pptr) { /* we are at right end; rewind & go up */
801 *sp->pptr = sp->root;
802 while ((*sp->pptr)->rb_left)
803 *sp->pptr = (*sp->pptr)->rb_left;
804 if (sp > stk) {
805 sp--;
806 if (!*sp->pptr) {
807 WARN_ON(1);
808 return NULL;
810 htb_next_rb_node(sp->pptr);
812 } else {
813 struct htb_class *cl;
814 struct htb_prio *clp;
816 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
817 if (!cl->level)
818 return cl;
819 clp = &cl->un.inner.clprio[prio];
820 (++sp)->root = clp->feed.rb_node;
821 sp->pptr = &clp->ptr;
822 sp->pid = &clp->last_ptr_id;
825 WARN_ON(1);
826 return NULL;
829 /* dequeues packet at given priority and level; call only if
830 * you are sure that there is active class at prio/level
832 static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio,
833 const int level)
835 struct sk_buff *skb = NULL;
836 struct htb_class *cl, *start;
837 struct htb_level *hlevel = &q->hlevel[level];
838 struct htb_prio *hprio = &hlevel->hprio[prio];
840 /* look initial class up in the row */
841 start = cl = htb_lookup_leaf(hprio, prio);
843 do {
844 next:
845 if (unlikely(!cl))
846 return NULL;
848 /* class can be empty - it is unlikely but can be true if leaf
849 * qdisc drops packets in enqueue routine or if someone used
850 * graft operation on the leaf since last dequeue;
851 * simply deactivate and skip such class
853 if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
854 struct htb_class *next;
855 htb_deactivate(q, cl);
857 /* row/level might become empty */
858 if ((q->row_mask[level] & (1 << prio)) == 0)
859 return NULL;
861 next = htb_lookup_leaf(hprio, prio);
863 if (cl == start) /* fix start if we just deleted it */
864 start = next;
865 cl = next;
866 goto next;
869 skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
870 if (likely(skb != NULL))
871 break;
873 qdisc_warn_nonwc("htb", cl->un.leaf.q);
874 htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr:
875 &q->hlevel[0].hprio[prio].ptr);
876 cl = htb_lookup_leaf(hprio, prio);
878 } while (cl != start);
880 if (likely(skb != NULL)) {
881 bstats_update(&cl->bstats, skb);
882 cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb);
883 if (cl->un.leaf.deficit[level] < 0) {
884 cl->un.leaf.deficit[level] += cl->quantum;
885 htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr :
886 &q->hlevel[0].hprio[prio].ptr);
888 /* this used to be after charge_class but this constelation
889 * gives us slightly better performance
891 if (!cl->un.leaf.q->q.qlen)
892 htb_deactivate(q, cl);
893 htb_charge_class(q, cl, level, skb);
895 return skb;
898 static struct sk_buff *htb_dequeue(struct Qdisc *sch)
900 struct sk_buff *skb;
901 struct htb_sched *q = qdisc_priv(sch);
902 int level;
903 s64 next_event;
904 unsigned long start_at;
906 /* try to dequeue direct packets as high prio (!) to minimize cpu work */
907 skb = __qdisc_dequeue_head(&q->direct_queue);
908 if (skb != NULL) {
910 qdisc_bstats_update(sch, skb);
911 qdisc_qstats_backlog_dec(sch, skb);
912 sch->q.qlen--;
913 return skb;
916 if (!sch->q.qlen)
917 goto fin;
918 q->now = ktime_get_ns();
919 start_at = jiffies;
921 next_event = q->now + 5LLU * NSEC_PER_SEC;
923 for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
924 /* common case optimization - skip event handler quickly */
925 int m;
926 s64 event = q->near_ev_cache[level];
928 if (q->now >= event) {
929 event = htb_do_events(q, level, start_at);
930 if (!event)
931 event = q->now + NSEC_PER_SEC;
932 q->near_ev_cache[level] = event;
935 if (next_event > event)
936 next_event = event;
938 m = ~q->row_mask[level];
939 while (m != (int)(-1)) {
940 int prio = ffz(m);
942 m |= 1 << prio;
943 skb = htb_dequeue_tree(q, prio, level);
944 if (likely(skb != NULL))
945 goto ok;
948 qdisc_qstats_overlimit(sch);
949 if (likely(next_event > q->now))
950 qdisc_watchdog_schedule_ns(&q->watchdog, next_event);
951 else
952 schedule_work(&q->work);
953 fin:
954 return skb;
957 /* reset all classes */
958 /* always caled under BH & queue lock */
959 static void htb_reset(struct Qdisc *sch)
961 struct htb_sched *q = qdisc_priv(sch);
962 struct htb_class *cl;
963 unsigned int i;
965 for (i = 0; i < q->clhash.hashsize; i++) {
966 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
967 if (cl->level)
968 memset(&cl->un.inner, 0, sizeof(cl->un.inner));
969 else {
970 if (cl->un.leaf.q)
971 qdisc_reset(cl->un.leaf.q);
972 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
974 cl->prio_activity = 0;
975 cl->cmode = HTB_CAN_SEND;
978 qdisc_watchdog_cancel(&q->watchdog);
979 __qdisc_reset_queue(&q->direct_queue);
980 sch->q.qlen = 0;
981 sch->qstats.backlog = 0;
982 memset(q->hlevel, 0, sizeof(q->hlevel));
983 memset(q->row_mask, 0, sizeof(q->row_mask));
984 for (i = 0; i < TC_HTB_NUMPRIO; i++)
985 INIT_LIST_HEAD(q->drops + i);
988 static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
989 [TCA_HTB_PARMS] = { .len = sizeof(struct tc_htb_opt) },
990 [TCA_HTB_INIT] = { .len = sizeof(struct tc_htb_glob) },
991 [TCA_HTB_CTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
992 [TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
993 [TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 },
994 [TCA_HTB_RATE64] = { .type = NLA_U64 },
995 [TCA_HTB_CEIL64] = { .type = NLA_U64 },
998 static void htb_work_func(struct work_struct *work)
1000 struct htb_sched *q = container_of(work, struct htb_sched, work);
1001 struct Qdisc *sch = q->watchdog.qdisc;
1003 rcu_read_lock();
1004 __netif_schedule(qdisc_root(sch));
1005 rcu_read_unlock();
1008 static int htb_init(struct Qdisc *sch, struct nlattr *opt)
1010 struct htb_sched *q = qdisc_priv(sch);
1011 struct nlattr *tb[TCA_HTB_MAX + 1];
1012 struct tc_htb_glob *gopt;
1013 int err;
1014 int i;
1016 qdisc_watchdog_init(&q->watchdog, sch);
1017 INIT_WORK(&q->work, htb_work_func);
1019 if (!opt)
1020 return -EINVAL;
1022 err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy);
1023 if (err < 0)
1024 return err;
1026 if (!tb[TCA_HTB_INIT])
1027 return -EINVAL;
1029 gopt = nla_data(tb[TCA_HTB_INIT]);
1030 if (gopt->version != HTB_VER >> 16)
1031 return -EINVAL;
1033 err = qdisc_class_hash_init(&q->clhash);
1034 if (err < 0)
1035 return err;
1036 for (i = 0; i < TC_HTB_NUMPRIO; i++)
1037 INIT_LIST_HEAD(q->drops + i);
1039 qdisc_skb_head_init(&q->direct_queue);
1041 if (tb[TCA_HTB_DIRECT_QLEN])
1042 q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
1043 else
1044 q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
1046 if ((q->rate2quantum = gopt->rate2quantum) < 1)
1047 q->rate2quantum = 1;
1048 q->defcls = gopt->defcls;
1050 return 0;
1053 static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1055 struct htb_sched *q = qdisc_priv(sch);
1056 struct nlattr *nest;
1057 struct tc_htb_glob gopt;
1059 /* Its safe to not acquire qdisc lock. As we hold RTNL,
1060 * no change can happen on the qdisc parameters.
1063 gopt.direct_pkts = q->direct_pkts;
1064 gopt.version = HTB_VER;
1065 gopt.rate2quantum = q->rate2quantum;
1066 gopt.defcls = q->defcls;
1067 gopt.debug = 0;
1069 nest = nla_nest_start(skb, TCA_OPTIONS);
1070 if (nest == NULL)
1071 goto nla_put_failure;
1072 if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) ||
1073 nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen))
1074 goto nla_put_failure;
1076 return nla_nest_end(skb, nest);
1078 nla_put_failure:
1079 nla_nest_cancel(skb, nest);
1080 return -1;
1083 static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1084 struct sk_buff *skb, struct tcmsg *tcm)
1086 struct htb_class *cl = (struct htb_class *)arg;
1087 struct nlattr *nest;
1088 struct tc_htb_opt opt;
1090 /* Its safe to not acquire qdisc lock. As we hold RTNL,
1091 * no change can happen on the class parameters.
1093 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1094 tcm->tcm_handle = cl->common.classid;
1095 if (!cl->level && cl->un.leaf.q)
1096 tcm->tcm_info = cl->un.leaf.q->handle;
1098 nest = nla_nest_start(skb, TCA_OPTIONS);
1099 if (nest == NULL)
1100 goto nla_put_failure;
1102 memset(&opt, 0, sizeof(opt));
1104 psched_ratecfg_getrate(&opt.rate, &cl->rate);
1105 opt.buffer = PSCHED_NS2TICKS(cl->buffer);
1106 psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
1107 opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
1108 opt.quantum = cl->quantum;
1109 opt.prio = cl->prio;
1110 opt.level = cl->level;
1111 if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
1112 goto nla_put_failure;
1113 if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
1114 nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps,
1115 TCA_HTB_PAD))
1116 goto nla_put_failure;
1117 if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
1118 nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps,
1119 TCA_HTB_PAD))
1120 goto nla_put_failure;
1122 return nla_nest_end(skb, nest);
1124 nla_put_failure:
1125 nla_nest_cancel(skb, nest);
1126 return -1;
1129 static int
1130 htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1132 struct htb_class *cl = (struct htb_class *)arg;
1133 struct gnet_stats_queue qs = {
1134 .drops = cl->drops,
1136 __u32 qlen = 0;
1138 if (!cl->level && cl->un.leaf.q) {
1139 qlen = cl->un.leaf.q->q.qlen;
1140 qs.backlog = cl->un.leaf.q->qstats.backlog;
1142 cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
1143 INT_MIN, INT_MAX);
1144 cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
1145 INT_MIN, INT_MAX);
1147 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
1148 d, NULL, &cl->bstats) < 0 ||
1149 gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
1150 gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0)
1151 return -1;
1153 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1156 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1157 struct Qdisc **old)
1159 struct htb_class *cl = (struct htb_class *)arg;
1161 if (cl->level)
1162 return -EINVAL;
1163 if (new == NULL &&
1164 (new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1165 cl->common.classid)) == NULL)
1166 return -ENOBUFS;
1168 *old = qdisc_replace(sch, new, &cl->un.leaf.q);
1169 return 0;
1172 static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1174 struct htb_class *cl = (struct htb_class *)arg;
1175 return !cl->level ? cl->un.leaf.q : NULL;
1178 static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1180 struct htb_class *cl = (struct htb_class *)arg;
1182 if (cl->un.leaf.q->q.qlen == 0)
1183 htb_deactivate(qdisc_priv(sch), cl);
1186 static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1188 struct htb_class *cl = htb_find(classid, sch);
1189 if (cl)
1190 cl->refcnt++;
1191 return (unsigned long)cl;
1194 static inline int htb_parent_last_child(struct htb_class *cl)
1196 if (!cl->parent)
1197 /* the root class */
1198 return 0;
1199 if (cl->parent->children > 1)
1200 /* not the last child */
1201 return 0;
1202 return 1;
1205 static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1206 struct Qdisc *new_q)
1208 struct htb_class *parent = cl->parent;
1210 WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity);
1212 if (parent->cmode != HTB_CAN_SEND)
1213 htb_safe_rb_erase(&parent->pq_node,
1214 &q->hlevel[parent->level].wait_pq);
1216 parent->level = 0;
1217 memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1218 INIT_LIST_HEAD(&parent->un.leaf.drop_list);
1219 parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
1220 parent->tokens = parent->buffer;
1221 parent->ctokens = parent->cbuffer;
1222 parent->t_c = ktime_get_ns();
1223 parent->cmode = HTB_CAN_SEND;
1226 static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1228 if (!cl->level) {
1229 WARN_ON(!cl->un.leaf.q);
1230 qdisc_destroy(cl->un.leaf.q);
1232 gen_kill_estimator(&cl->bstats, &cl->rate_est);
1233 tcf_destroy_chain(&cl->filter_list);
1234 kfree(cl);
1237 static void htb_destroy(struct Qdisc *sch)
1239 struct htb_sched *q = qdisc_priv(sch);
1240 struct hlist_node *next;
1241 struct htb_class *cl;
1242 unsigned int i;
1244 cancel_work_sync(&q->work);
1245 qdisc_watchdog_cancel(&q->watchdog);
1246 /* This line used to be after htb_destroy_class call below
1247 * and surprisingly it worked in 2.4. But it must precede it
1248 * because filter need its target class alive to be able to call
1249 * unbind_filter on it (without Oops).
1251 tcf_destroy_chain(&q->filter_list);
1253 for (i = 0; i < q->clhash.hashsize; i++) {
1254 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode)
1255 tcf_destroy_chain(&cl->filter_list);
1257 for (i = 0; i < q->clhash.hashsize; i++) {
1258 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1259 common.hnode)
1260 htb_destroy_class(sch, cl);
1262 qdisc_class_hash_destroy(&q->clhash);
1263 __qdisc_reset_queue(&q->direct_queue);
1266 static int htb_delete(struct Qdisc *sch, unsigned long arg)
1268 struct htb_sched *q = qdisc_priv(sch);
1269 struct htb_class *cl = (struct htb_class *)arg;
1270 struct Qdisc *new_q = NULL;
1271 int last_child = 0;
1273 /* TODO: why don't allow to delete subtree ? references ? does
1274 * tc subsys guarantee us that in htb_destroy it holds no class
1275 * refs so that we can remove children safely there ?
1277 if (cl->children || cl->filter_cnt)
1278 return -EBUSY;
1280 if (!cl->level && htb_parent_last_child(cl)) {
1281 new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1282 cl->parent->common.classid);
1283 last_child = 1;
1286 sch_tree_lock(sch);
1288 if (!cl->level) {
1289 unsigned int qlen = cl->un.leaf.q->q.qlen;
1290 unsigned int backlog = cl->un.leaf.q->qstats.backlog;
1292 qdisc_reset(cl->un.leaf.q);
1293 qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog);
1296 /* delete from hash and active; remainder in destroy_class */
1297 qdisc_class_hash_remove(&q->clhash, &cl->common);
1298 if (cl->parent)
1299 cl->parent->children--;
1301 if (cl->prio_activity)
1302 htb_deactivate(q, cl);
1304 if (cl->cmode != HTB_CAN_SEND)
1305 htb_safe_rb_erase(&cl->pq_node,
1306 &q->hlevel[cl->level].wait_pq);
1308 if (last_child)
1309 htb_parent_to_leaf(q, cl, new_q);
1311 BUG_ON(--cl->refcnt == 0);
1313 * This shouldn't happen: we "hold" one cops->get() when called
1314 * from tc_ctl_tclass; the destroy method is done from cops->put().
1317 sch_tree_unlock(sch);
1318 return 0;
1321 static void htb_put(struct Qdisc *sch, unsigned long arg)
1323 struct htb_class *cl = (struct htb_class *)arg;
1325 if (--cl->refcnt == 0)
1326 htb_destroy_class(sch, cl);
1329 static int htb_change_class(struct Qdisc *sch, u32 classid,
1330 u32 parentid, struct nlattr **tca,
1331 unsigned long *arg)
1333 int err = -EINVAL;
1334 struct htb_sched *q = qdisc_priv(sch);
1335 struct htb_class *cl = (struct htb_class *)*arg, *parent;
1336 struct nlattr *opt = tca[TCA_OPTIONS];
1337 struct nlattr *tb[TCA_HTB_MAX + 1];
1338 struct tc_htb_opt *hopt;
1339 u64 rate64, ceil64;
1341 /* extract all subattrs from opt attr */
1342 if (!opt)
1343 goto failure;
1345 err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy);
1346 if (err < 0)
1347 goto failure;
1349 err = -EINVAL;
1350 if (tb[TCA_HTB_PARMS] == NULL)
1351 goto failure;
1353 parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1355 hopt = nla_data(tb[TCA_HTB_PARMS]);
1356 if (!hopt->rate.rate || !hopt->ceil.rate)
1357 goto failure;
1359 /* Keeping backward compatible with rate_table based iproute2 tc */
1360 if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
1361 qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]));
1363 if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE)
1364 qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]));
1366 if (!cl) { /* new class */
1367 struct Qdisc *new_q;
1368 int prio;
1369 struct {
1370 struct nlattr nla;
1371 struct gnet_estimator opt;
1372 } est = {
1373 .nla = {
1374 .nla_len = nla_attr_size(sizeof(est.opt)),
1375 .nla_type = TCA_RATE,
1377 .opt = {
1378 /* 4s interval, 16s averaging constant */
1379 .interval = 2,
1380 .ewma_log = 2,
1384 /* check for valid classid */
1385 if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
1386 htb_find(classid, sch))
1387 goto failure;
1389 /* check maximal depth */
1390 if (parent && parent->parent && parent->parent->level < 2) {
1391 pr_err("htb: tree is too deep\n");
1392 goto failure;
1394 err = -ENOBUFS;
1395 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1396 if (!cl)
1397 goto failure;
1399 if (htb_rate_est || tca[TCA_RATE]) {
1400 err = gen_new_estimator(&cl->bstats, NULL,
1401 &cl->rate_est,
1402 NULL,
1403 qdisc_root_sleeping_running(sch),
1404 tca[TCA_RATE] ? : &est.nla);
1405 if (err) {
1406 kfree(cl);
1407 goto failure;
1411 cl->refcnt = 1;
1412 cl->children = 0;
1413 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1414 RB_CLEAR_NODE(&cl->pq_node);
1416 for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1417 RB_CLEAR_NODE(&cl->node[prio]);
1419 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1420 * so that can't be used inside of sch_tree_lock
1421 * -- thanks to Karlis Peisenieks
1423 new_q = qdisc_create_dflt(sch->dev_queue,
1424 &pfifo_qdisc_ops, classid);
1425 sch_tree_lock(sch);
1426 if (parent && !parent->level) {
1427 unsigned int qlen = parent->un.leaf.q->q.qlen;
1428 unsigned int backlog = parent->un.leaf.q->qstats.backlog;
1430 /* turn parent into inner node */
1431 qdisc_reset(parent->un.leaf.q);
1432 qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog);
1433 qdisc_destroy(parent->un.leaf.q);
1434 if (parent->prio_activity)
1435 htb_deactivate(q, parent);
1437 /* remove from evt list because of level change */
1438 if (parent->cmode != HTB_CAN_SEND) {
1439 htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq);
1440 parent->cmode = HTB_CAN_SEND;
1442 parent->level = (parent->parent ? parent->parent->level
1443 : TC_HTB_MAXDEPTH) - 1;
1444 memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1446 /* leaf (we) needs elementary qdisc */
1447 cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1449 cl->common.classid = classid;
1450 cl->parent = parent;
1452 /* set class to be in HTB_CAN_SEND state */
1453 cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
1454 cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
1455 cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */
1456 cl->t_c = ktime_get_ns();
1457 cl->cmode = HTB_CAN_SEND;
1459 /* attach to the hash list and parent's family */
1460 qdisc_class_hash_insert(&q->clhash, &cl->common);
1461 if (parent)
1462 parent->children++;
1463 } else {
1464 if (tca[TCA_RATE]) {
1465 err = gen_replace_estimator(&cl->bstats, NULL,
1466 &cl->rate_est,
1467 NULL,
1468 qdisc_root_sleeping_running(sch),
1469 tca[TCA_RATE]);
1470 if (err)
1471 return err;
1473 sch_tree_lock(sch);
1476 rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
1478 ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
1480 psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
1481 psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
1483 /* it used to be a nasty bug here, we have to check that node
1484 * is really leaf before changing cl->un.leaf !
1486 if (!cl->level) {
1487 u64 quantum = cl->rate.rate_bytes_ps;
1489 do_div(quantum, q->rate2quantum);
1490 cl->quantum = min_t(u64, quantum, INT_MAX);
1492 if (!hopt->quantum && cl->quantum < 1000) {
1493 pr_warn("HTB: quantum of class %X is small. Consider r2q change.\n",
1494 cl->common.classid);
1495 cl->quantum = 1000;
1497 if (!hopt->quantum && cl->quantum > 200000) {
1498 pr_warn("HTB: quantum of class %X is big. Consider r2q change.\n",
1499 cl->common.classid);
1500 cl->quantum = 200000;
1502 if (hopt->quantum)
1503 cl->quantum = hopt->quantum;
1504 if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
1505 cl->prio = TC_HTB_NUMPRIO - 1;
1508 cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
1509 cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
1511 sch_tree_unlock(sch);
1513 qdisc_class_hash_grow(sch, &q->clhash);
1515 *arg = (unsigned long)cl;
1516 return 0;
1518 failure:
1519 return err;
1522 static struct tcf_proto __rcu **htb_find_tcf(struct Qdisc *sch,
1523 unsigned long arg)
1525 struct htb_sched *q = qdisc_priv(sch);
1526 struct htb_class *cl = (struct htb_class *)arg;
1527 struct tcf_proto __rcu **fl = cl ? &cl->filter_list : &q->filter_list;
1529 return fl;
1532 static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1533 u32 classid)
1535 struct htb_class *cl = htb_find(classid, sch);
1537 /*if (cl && !cl->level) return 0;
1538 * The line above used to be there to prevent attaching filters to
1539 * leaves. But at least tc_index filter uses this just to get class
1540 * for other reasons so that we have to allow for it.
1541 * ----
1542 * 19.6.2002 As Werner explained it is ok - bind filter is just
1543 * another way to "lock" the class - unlike "get" this lock can
1544 * be broken by class during destroy IIUC.
1546 if (cl)
1547 cl->filter_cnt++;
1548 return (unsigned long)cl;
1551 static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1553 struct htb_class *cl = (struct htb_class *)arg;
1555 if (cl)
1556 cl->filter_cnt--;
1559 static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1561 struct htb_sched *q = qdisc_priv(sch);
1562 struct htb_class *cl;
1563 unsigned int i;
1565 if (arg->stop)
1566 return;
1568 for (i = 0; i < q->clhash.hashsize; i++) {
1569 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1570 if (arg->count < arg->skip) {
1571 arg->count++;
1572 continue;
1574 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1575 arg->stop = 1;
1576 return;
1578 arg->count++;
1583 static const struct Qdisc_class_ops htb_class_ops = {
1584 .graft = htb_graft,
1585 .leaf = htb_leaf,
1586 .qlen_notify = htb_qlen_notify,
1587 .get = htb_get,
1588 .put = htb_put,
1589 .change = htb_change_class,
1590 .delete = htb_delete,
1591 .walk = htb_walk,
1592 .tcf_chain = htb_find_tcf,
1593 .bind_tcf = htb_bind_filter,
1594 .unbind_tcf = htb_unbind_filter,
1595 .dump = htb_dump_class,
1596 .dump_stats = htb_dump_class_stats,
1599 static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
1600 .cl_ops = &htb_class_ops,
1601 .id = "htb",
1602 .priv_size = sizeof(struct htb_sched),
1603 .enqueue = htb_enqueue,
1604 .dequeue = htb_dequeue,
1605 .peek = qdisc_peek_dequeued,
1606 .init = htb_init,
1607 .reset = htb_reset,
1608 .destroy = htb_destroy,
1609 .dump = htb_dump,
1610 .owner = THIS_MODULE,
1613 static int __init htb_module_init(void)
1615 return register_qdisc(&htb_qdisc_ops);
1617 static void __exit htb_module_exit(void)
1619 unregister_qdisc(&htb_qdisc_ops);
1622 module_init(htb_module_init)
1623 module_exit(htb_module_exit)
1624 MODULE_LICENSE("GPL");