2 * Copyright (c) 2003 Patrick McHardy, <kaber@trash.net>
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * 2003-10-17 - Ported from altq
12 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
14 * Permission to use, copy, modify, and distribute this software and
15 * its documentation is hereby granted (including for commercial or
16 * for-profit use), provided that both the copyright notice and this
17 * permission notice appear in all copies of the software, derivative
18 * works, or modified versions, and any portions thereof.
20 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
21 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS
22 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
28 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
32 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
35 * Carnegie Mellon encourages (but does not require) users of this
36 * software to return any improvements or extensions that they make,
37 * and to grant Carnegie Mellon the rights to redistribute these
38 * changes without encumbrance.
41 * H-FSC is described in Proceedings of SIGCOMM'97,
42 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
43 * Real-Time and Priority Service"
44 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
46 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
47 * when a class has an upperlimit, the fit-time is computed from the
48 * upperlimit service curve. the link-sharing scheduler does not schedule
49 * a class whose fit-time exceeds the current time.
52 #include <linux/kernel.h>
53 #include <linux/module.h>
54 #include <linux/types.h>
55 #include <linux/errno.h>
56 #include <linux/jiffies.h>
57 #include <linux/compiler.h>
58 #include <linux/spinlock.h>
59 #include <linux/skbuff.h>
60 #include <linux/string.h>
61 #include <linux/slab.h>
62 #include <linux/list.h>
63 #include <linux/rbtree.h>
64 #include <linux/init.h>
65 #include <linux/netdevice.h>
66 #include <linux/rtnetlink.h>
67 #include <linux/pkt_sched.h>
68 #include <net/netlink.h>
69 #include <net/pkt_sched.h>
70 #include <net/pkt_cls.h>
71 #include <asm/system.h>
72 #include <asm/div64.h>
75 * kernel internal service curve representation:
76 * coordinates are given by 64 bit unsigned integers.
77 * x-axis: unit is clock count.
78 * y-axis: unit is byte.
80 * The service curve parameters are converted to the internal
81 * representation. The slope values are scaled to avoid overflow.
82 * the inverse slope values as well as the y-projection of the 1st
83 * segment are kept in order to to avoid 64-bit divide operations
84 * that are expensive on 32-bit architectures.
89 u64 sm1
; /* scaled slope of the 1st segment */
90 u64 ism1
; /* scaled inverse-slope of the 1st segment */
91 u64 dx
; /* the x-projection of the 1st segment */
92 u64 dy
; /* the y-projection of the 1st segment */
93 u64 sm2
; /* scaled slope of the 2nd segment */
94 u64 ism2
; /* scaled inverse-slope of the 2nd segment */
97 /* runtime service curve */
100 u64 x
; /* current starting position on x-axis */
101 u64 y
; /* current starting position on y-axis */
102 u64 sm1
; /* scaled slope of the 1st segment */
103 u64 ism1
; /* scaled inverse-slope of the 1st segment */
104 u64 dx
; /* the x-projection of the 1st segment */
105 u64 dy
; /* the y-projection of the 1st segment */
106 u64 sm2
; /* scaled slope of the 2nd segment */
107 u64 ism2
; /* scaled inverse-slope of the 2nd segment */
110 enum hfsc_class_flags
119 u32 classid
; /* class id */
120 unsigned int refcnt
; /* usage count */
122 struct gnet_stats_basic bstats
;
123 struct gnet_stats_queue qstats
;
124 struct gnet_stats_rate_est rate_est
;
125 spinlock_t
*stats_lock
;
126 unsigned int level
; /* class level in hierarchy */
127 struct tcf_proto
*filter_list
; /* filter list */
128 unsigned int filter_cnt
; /* filter count */
130 struct hfsc_sched
*sched
; /* scheduler data */
131 struct hfsc_class
*cl_parent
; /* parent class */
132 struct list_head siblings
; /* sibling classes */
133 struct list_head children
; /* child classes */
134 struct Qdisc
*qdisc
; /* leaf qdisc */
136 struct rb_node el_node
; /* qdisc's eligible tree member */
137 struct rb_root vt_tree
; /* active children sorted by cl_vt */
138 struct rb_node vt_node
; /* parent's vt_tree member */
139 struct rb_root cf_tree
; /* active children sorted by cl_f */
140 struct rb_node cf_node
; /* parent's cf_heap member */
141 struct list_head hlist
; /* hash list member */
142 struct list_head dlist
; /* drop list member */
144 u64 cl_total
; /* total work in bytes */
145 u64 cl_cumul
; /* cumulative work in bytes done by
146 real-time criteria */
148 u64 cl_d
; /* deadline*/
149 u64 cl_e
; /* eligible time */
150 u64 cl_vt
; /* virtual time */
151 u64 cl_f
; /* time when this class will fit for
152 link-sharing, max(myf, cfmin) */
153 u64 cl_myf
; /* my fit-time (calculated from this
154 class's own upperlimit curve) */
155 u64 cl_myfadj
; /* my fit-time adjustment (to cancel
156 history dependence) */
157 u64 cl_cfmin
; /* earliest children's fit-time (used
158 with cl_myf to obtain cl_f) */
159 u64 cl_cvtmin
; /* minimal virtual time among the
160 children fit for link-sharing
161 (monotonic within a period) */
162 u64 cl_vtadj
; /* intra-period cumulative vt
164 u64 cl_vtoff
; /* inter-period cumulative vt offset */
165 u64 cl_cvtmax
; /* max child's vt in the last period */
166 u64 cl_cvtoff
; /* cumulative cvtmax of all periods */
167 u64 cl_pcvtoff
; /* parent's cvtoff at initalization
170 struct internal_sc cl_rsc
; /* internal real-time service curve */
171 struct internal_sc cl_fsc
; /* internal fair service curve */
172 struct internal_sc cl_usc
; /* internal upperlimit service curve */
173 struct runtime_sc cl_deadline
; /* deadline curve */
174 struct runtime_sc cl_eligible
; /* eligible curve */
175 struct runtime_sc cl_virtual
; /* virtual curve */
176 struct runtime_sc cl_ulimit
; /* upperlimit curve */
178 unsigned long cl_flags
; /* which curves are valid */
179 unsigned long cl_vtperiod
; /* vt period sequence number */
180 unsigned long cl_parentperiod
;/* parent's vt period sequence number*/
181 unsigned long cl_nactive
; /* number of active children */
184 #define HFSC_HSIZE 16
188 u16 defcls
; /* default class id */
189 struct hfsc_class root
; /* root class */
190 struct list_head clhash
[HFSC_HSIZE
]; /* class hash */
191 struct rb_root eligible
; /* eligible tree */
192 struct list_head droplist
; /* active leaf class list (for
194 struct sk_buff_head requeue
; /* requeued packet */
195 struct qdisc_watchdog watchdog
; /* watchdog timer */
198 #define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */
202 * eligible tree holds backlogged classes being sorted by their eligible times.
203 * there is one eligible tree per hfsc instance.
207 eltree_insert(struct hfsc_class
*cl
)
209 struct rb_node
**p
= &cl
->sched
->eligible
.rb_node
;
210 struct rb_node
*parent
= NULL
;
211 struct hfsc_class
*cl1
;
215 cl1
= rb_entry(parent
, struct hfsc_class
, el_node
);
216 if (cl
->cl_e
>= cl1
->cl_e
)
217 p
= &parent
->rb_right
;
219 p
= &parent
->rb_left
;
221 rb_link_node(&cl
->el_node
, parent
, p
);
222 rb_insert_color(&cl
->el_node
, &cl
->sched
->eligible
);
226 eltree_remove(struct hfsc_class
*cl
)
228 rb_erase(&cl
->el_node
, &cl
->sched
->eligible
);
232 eltree_update(struct hfsc_class
*cl
)
238 /* find the class with the minimum deadline among the eligible classes */
239 static inline struct hfsc_class
*
240 eltree_get_mindl(struct hfsc_sched
*q
, u64 cur_time
)
242 struct hfsc_class
*p
, *cl
= NULL
;
245 for (n
= rb_first(&q
->eligible
); n
!= NULL
; n
= rb_next(n
)) {
246 p
= rb_entry(n
, struct hfsc_class
, el_node
);
247 if (p
->cl_e
> cur_time
)
249 if (cl
== NULL
|| p
->cl_d
< cl
->cl_d
)
255 /* find the class with minimum eligible time among the eligible classes */
256 static inline struct hfsc_class
*
257 eltree_get_minel(struct hfsc_sched
*q
)
261 n
= rb_first(&q
->eligible
);
264 return rb_entry(n
, struct hfsc_class
, el_node
);
268 * vttree holds holds backlogged child classes being sorted by their virtual
269 * time. each intermediate class has one vttree.
272 vttree_insert(struct hfsc_class
*cl
)
274 struct rb_node
**p
= &cl
->cl_parent
->vt_tree
.rb_node
;
275 struct rb_node
*parent
= NULL
;
276 struct hfsc_class
*cl1
;
280 cl1
= rb_entry(parent
, struct hfsc_class
, vt_node
);
281 if (cl
->cl_vt
>= cl1
->cl_vt
)
282 p
= &parent
->rb_right
;
284 p
= &parent
->rb_left
;
286 rb_link_node(&cl
->vt_node
, parent
, p
);
287 rb_insert_color(&cl
->vt_node
, &cl
->cl_parent
->vt_tree
);
291 vttree_remove(struct hfsc_class
*cl
)
293 rb_erase(&cl
->vt_node
, &cl
->cl_parent
->vt_tree
);
297 vttree_update(struct hfsc_class
*cl
)
303 static inline struct hfsc_class
*
304 vttree_firstfit(struct hfsc_class
*cl
, u64 cur_time
)
306 struct hfsc_class
*p
;
309 for (n
= rb_first(&cl
->vt_tree
); n
!= NULL
; n
= rb_next(n
)) {
310 p
= rb_entry(n
, struct hfsc_class
, vt_node
);
311 if (p
->cl_f
<= cur_time
)
318 * get the leaf class with the minimum vt in the hierarchy
320 static struct hfsc_class
*
321 vttree_get_minvt(struct hfsc_class
*cl
, u64 cur_time
)
323 /* if root-class's cfmin is bigger than cur_time nothing to do */
324 if (cl
->cl_cfmin
> cur_time
)
327 while (cl
->level
> 0) {
328 cl
= vttree_firstfit(cl
, cur_time
);
332 * update parent's cl_cvtmin.
334 if (cl
->cl_parent
->cl_cvtmin
< cl
->cl_vt
)
335 cl
->cl_parent
->cl_cvtmin
= cl
->cl_vt
;
341 cftree_insert(struct hfsc_class
*cl
)
343 struct rb_node
**p
= &cl
->cl_parent
->cf_tree
.rb_node
;
344 struct rb_node
*parent
= NULL
;
345 struct hfsc_class
*cl1
;
349 cl1
= rb_entry(parent
, struct hfsc_class
, cf_node
);
350 if (cl
->cl_f
>= cl1
->cl_f
)
351 p
= &parent
->rb_right
;
353 p
= &parent
->rb_left
;
355 rb_link_node(&cl
->cf_node
, parent
, p
);
356 rb_insert_color(&cl
->cf_node
, &cl
->cl_parent
->cf_tree
);
360 cftree_remove(struct hfsc_class
*cl
)
362 rb_erase(&cl
->cf_node
, &cl
->cl_parent
->cf_tree
);
366 cftree_update(struct hfsc_class
*cl
)
373 * service curve support functions
375 * external service curve parameters
378 * internal service curve parameters
379 * sm: (bytes/psched_us) << SM_SHIFT
380 * ism: (psched_us/byte) << ISM_SHIFT
383 * The clock source resolution with ktime is 1.024us.
385 * sm and ism are scaled in order to keep effective digits.
386 * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
387 * digits in decimal using the following table.
389 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps
390 * ------------+-------------------------------------------------------
391 * bytes/1.024us 12.8e-3 128e-3 1280e-3 12800e-3 128000e-3
393 * 1.024us/byte 78.125 7.8125 0.78125 0.078125 0.0078125
398 #define SM_MASK ((1ULL << SM_SHIFT) - 1)
399 #define ISM_MASK ((1ULL << ISM_SHIFT) - 1)
402 seg_x2y(u64 x
, u64 sm
)
408 * y = x * sm >> SM_SHIFT
409 * but divide it for the upper and lower bits to avoid overflow
411 y
= (x
>> SM_SHIFT
) * sm
+ (((x
& SM_MASK
) * sm
) >> SM_SHIFT
);
416 seg_y2x(u64 y
, u64 ism
)
422 else if (ism
== HT_INFINITY
)
425 x
= (y
>> ISM_SHIFT
) * ism
426 + (((y
& ISM_MASK
) * ism
) >> ISM_SHIFT
);
431 /* Convert m (bps) into sm (bytes/psched us) */
437 sm
= ((u64
)m
<< SM_SHIFT
);
438 sm
+= PSCHED_TICKS_PER_SEC
- 1;
439 do_div(sm
, PSCHED_TICKS_PER_SEC
);
443 /* convert m (bps) into ism (psched us/byte) */
452 ism
= ((u64
)PSCHED_TICKS_PER_SEC
<< ISM_SHIFT
);
459 /* convert d (us) into dx (psched us) */
465 dx
= ((u64
)d
* PSCHED_TICKS_PER_SEC
);
466 dx
+= USEC_PER_SEC
- 1;
467 do_div(dx
, USEC_PER_SEC
);
471 /* convert sm (bytes/psched us) into m (bps) */
477 m
= (sm
* PSCHED_TICKS_PER_SEC
) >> SM_SHIFT
;
481 /* convert dx (psched us) into d (us) */
487 d
= dx
* USEC_PER_SEC
;
488 do_div(d
, PSCHED_TICKS_PER_SEC
);
493 sc2isc(struct tc_service_curve
*sc
, struct internal_sc
*isc
)
495 isc
->sm1
= m2sm(sc
->m1
);
496 isc
->ism1
= m2ism(sc
->m1
);
497 isc
->dx
= d2dx(sc
->d
);
498 isc
->dy
= seg_x2y(isc
->dx
, isc
->sm1
);
499 isc
->sm2
= m2sm(sc
->m2
);
500 isc
->ism2
= m2ism(sc
->m2
);
504 * initialize the runtime service curve with the given internal
505 * service curve starting at (x, y).
508 rtsc_init(struct runtime_sc
*rtsc
, struct internal_sc
*isc
, u64 x
, u64 y
)
512 rtsc
->sm1
= isc
->sm1
;
513 rtsc
->ism1
= isc
->ism1
;
516 rtsc
->sm2
= isc
->sm2
;
517 rtsc
->ism2
= isc
->ism2
;
521 * calculate the y-projection of the runtime service curve by the
522 * given x-projection value
525 rtsc_y2x(struct runtime_sc
*rtsc
, u64 y
)
531 else if (y
<= rtsc
->y
+ rtsc
->dy
) {
532 /* x belongs to the 1st segment */
534 x
= rtsc
->x
+ rtsc
->dx
;
536 x
= rtsc
->x
+ seg_y2x(y
- rtsc
->y
, rtsc
->ism1
);
538 /* x belongs to the 2nd segment */
539 x
= rtsc
->x
+ rtsc
->dx
540 + seg_y2x(y
- rtsc
->y
- rtsc
->dy
, rtsc
->ism2
);
546 rtsc_x2y(struct runtime_sc
*rtsc
, u64 x
)
552 else if (x
<= rtsc
->x
+ rtsc
->dx
)
553 /* y belongs to the 1st segment */
554 y
= rtsc
->y
+ seg_x2y(x
- rtsc
->x
, rtsc
->sm1
);
556 /* y belongs to the 2nd segment */
557 y
= rtsc
->y
+ rtsc
->dy
558 + seg_x2y(x
- rtsc
->x
- rtsc
->dx
, rtsc
->sm2
);
563 * update the runtime service curve by taking the minimum of the current
564 * runtime service curve and the service curve starting at (x, y).
567 rtsc_min(struct runtime_sc
*rtsc
, struct internal_sc
*isc
, u64 x
, u64 y
)
572 if (isc
->sm1
<= isc
->sm2
) {
573 /* service curve is convex */
574 y1
= rtsc_x2y(rtsc
, x
);
576 /* the current rtsc is smaller */
584 * service curve is concave
585 * compute the two y values of the current rtsc
589 y1
= rtsc_x2y(rtsc
, x
);
591 /* rtsc is below isc, no change to rtsc */
595 y2
= rtsc_x2y(rtsc
, x
+ isc
->dx
);
596 if (y2
>= y
+ isc
->dy
) {
597 /* rtsc is above isc, replace rtsc by isc */
606 * the two curves intersect
607 * compute the offsets (dx, dy) using the reverse
608 * function of seg_x2y()
609 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
611 dx
= (y1
- y
) << SM_SHIFT
;
612 dsm
= isc
->sm1
- isc
->sm2
;
615 * check if (x, y1) belongs to the 1st segment of rtsc.
616 * if so, add the offset.
618 if (rtsc
->x
+ rtsc
->dx
> x
)
619 dx
+= rtsc
->x
+ rtsc
->dx
- x
;
620 dy
= seg_x2y(dx
, isc
->sm1
);
630 init_ed(struct hfsc_class
*cl
, unsigned int next_len
)
632 u64 cur_time
= psched_get_time();
634 /* update the deadline curve */
635 rtsc_min(&cl
->cl_deadline
, &cl
->cl_rsc
, cur_time
, cl
->cl_cumul
);
638 * update the eligible curve.
639 * for concave, it is equal to the deadline curve.
640 * for convex, it is a linear curve with slope m2.
642 cl
->cl_eligible
= cl
->cl_deadline
;
643 if (cl
->cl_rsc
.sm1
<= cl
->cl_rsc
.sm2
) {
644 cl
->cl_eligible
.dx
= 0;
645 cl
->cl_eligible
.dy
= 0;
648 /* compute e and d */
649 cl
->cl_e
= rtsc_y2x(&cl
->cl_eligible
, cl
->cl_cumul
);
650 cl
->cl_d
= rtsc_y2x(&cl
->cl_deadline
, cl
->cl_cumul
+ next_len
);
656 update_ed(struct hfsc_class
*cl
, unsigned int next_len
)
658 cl
->cl_e
= rtsc_y2x(&cl
->cl_eligible
, cl
->cl_cumul
);
659 cl
->cl_d
= rtsc_y2x(&cl
->cl_deadline
, cl
->cl_cumul
+ next_len
);
665 update_d(struct hfsc_class
*cl
, unsigned int next_len
)
667 cl
->cl_d
= rtsc_y2x(&cl
->cl_deadline
, cl
->cl_cumul
+ next_len
);
671 update_cfmin(struct hfsc_class
*cl
)
673 struct rb_node
*n
= rb_first(&cl
->cf_tree
);
674 struct hfsc_class
*p
;
680 p
= rb_entry(n
, struct hfsc_class
, cf_node
);
681 cl
->cl_cfmin
= p
->cl_f
;
685 init_vf(struct hfsc_class
*cl
, unsigned int len
)
687 struct hfsc_class
*max_cl
;
694 for (; cl
->cl_parent
!= NULL
; cl
= cl
->cl_parent
) {
695 if (go_active
&& cl
->cl_nactive
++ == 0)
701 n
= rb_last(&cl
->cl_parent
->vt_tree
);
703 max_cl
= rb_entry(n
, struct hfsc_class
,vt_node
);
705 * set vt to the average of the min and max
706 * classes. if the parent's period didn't
707 * change, don't decrease vt of the class.
710 if (cl
->cl_parent
->cl_cvtmin
!= 0)
711 vt
= (cl
->cl_parent
->cl_cvtmin
+ vt
)/2;
713 if (cl
->cl_parent
->cl_vtperiod
!=
714 cl
->cl_parentperiod
|| vt
> cl
->cl_vt
)
718 * first child for a new parent backlog period.
719 * add parent's cvtmax to cvtoff to make a new
720 * vt (vtoff + vt) larger than the vt in the
721 * last period for all children.
723 vt
= cl
->cl_parent
->cl_cvtmax
;
724 cl
->cl_parent
->cl_cvtoff
+= vt
;
725 cl
->cl_parent
->cl_cvtmax
= 0;
726 cl
->cl_parent
->cl_cvtmin
= 0;
730 cl
->cl_vtoff
= cl
->cl_parent
->cl_cvtoff
-
733 /* update the virtual curve */
734 vt
= cl
->cl_vt
+ cl
->cl_vtoff
;
735 rtsc_min(&cl
->cl_virtual
, &cl
->cl_fsc
, vt
,
737 if (cl
->cl_virtual
.x
== vt
) {
738 cl
->cl_virtual
.x
-= cl
->cl_vtoff
;
743 cl
->cl_vtperiod
++; /* increment vt period */
744 cl
->cl_parentperiod
= cl
->cl_parent
->cl_vtperiod
;
745 if (cl
->cl_parent
->cl_nactive
== 0)
746 cl
->cl_parentperiod
++;
752 if (cl
->cl_flags
& HFSC_USC
) {
753 /* class has upper limit curve */
755 cur_time
= psched_get_time();
757 /* update the ulimit curve */
758 rtsc_min(&cl
->cl_ulimit
, &cl
->cl_usc
, cur_time
,
761 cl
->cl_myf
= rtsc_y2x(&cl
->cl_ulimit
,
767 f
= max(cl
->cl_myf
, cl
->cl_cfmin
);
771 update_cfmin(cl
->cl_parent
);
777 update_vf(struct hfsc_class
*cl
, unsigned int len
, u64 cur_time
)
779 u64 f
; /* , myf_bound, delta; */
782 if (cl
->qdisc
->q
.qlen
== 0 && cl
->cl_flags
& HFSC_FSC
)
785 for (; cl
->cl_parent
!= NULL
; cl
= cl
->cl_parent
) {
788 if (!(cl
->cl_flags
& HFSC_FSC
) || cl
->cl_nactive
== 0)
791 if (go_passive
&& --cl
->cl_nactive
== 0)
797 /* no more active child, going passive */
799 /* update cvtmax of the parent class */
800 if (cl
->cl_vt
> cl
->cl_parent
->cl_cvtmax
)
801 cl
->cl_parent
->cl_cvtmax
= cl
->cl_vt
;
803 /* remove this class from the vt tree */
807 update_cfmin(cl
->cl_parent
);
815 cl
->cl_vt
= rtsc_y2x(&cl
->cl_virtual
, cl
->cl_total
)
816 - cl
->cl_vtoff
+ cl
->cl_vtadj
;
819 * if vt of the class is smaller than cvtmin,
820 * the class was skipped in the past due to non-fit.
821 * if so, we need to adjust vtadj.
823 if (cl
->cl_vt
< cl
->cl_parent
->cl_cvtmin
) {
824 cl
->cl_vtadj
+= cl
->cl_parent
->cl_cvtmin
- cl
->cl_vt
;
825 cl
->cl_vt
= cl
->cl_parent
->cl_cvtmin
;
828 /* update the vt tree */
831 if (cl
->cl_flags
& HFSC_USC
) {
832 cl
->cl_myf
= cl
->cl_myfadj
+ rtsc_y2x(&cl
->cl_ulimit
,
836 * This code causes classes to stay way under their
837 * limit when multiple classes are used at gigabit
838 * speed. needs investigation. -kaber
841 * if myf lags behind by more than one clock tick
842 * from the current time, adjust myfadj to prevent
843 * a rate-limited class from going greedy.
844 * in a steady state under rate-limiting, myf
845 * fluctuates within one clock tick.
847 myf_bound
= cur_time
- PSCHED_JIFFIE2US(1);
848 if (cl
->cl_myf
< myf_bound
) {
849 delta
= cur_time
- cl
->cl_myf
;
850 cl
->cl_myfadj
+= delta
;
856 f
= max(cl
->cl_myf
, cl
->cl_cfmin
);
860 update_cfmin(cl
->cl_parent
);
866 set_active(struct hfsc_class
*cl
, unsigned int len
)
868 if (cl
->cl_flags
& HFSC_RSC
)
870 if (cl
->cl_flags
& HFSC_FSC
)
873 list_add_tail(&cl
->dlist
, &cl
->sched
->droplist
);
877 set_passive(struct hfsc_class
*cl
)
879 if (cl
->cl_flags
& HFSC_RSC
)
882 list_del(&cl
->dlist
);
885 * vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
886 * needs to be called explicitly to remove a class from vttree.
891 * hack to get length of first packet in queue.
894 qdisc_peek_len(struct Qdisc
*sch
)
899 skb
= sch
->dequeue(sch
);
902 printk("qdisc_peek_len: non work-conserving qdisc ?\n");
906 if (unlikely(sch
->ops
->requeue(skb
, sch
) != NET_XMIT_SUCCESS
)) {
908 printk("qdisc_peek_len: failed to requeue\n");
909 qdisc_tree_decrease_qlen(sch
, 1);
916 hfsc_purge_queue(struct Qdisc
*sch
, struct hfsc_class
*cl
)
918 unsigned int len
= cl
->qdisc
->q
.qlen
;
920 qdisc_reset(cl
->qdisc
);
921 qdisc_tree_decrease_qlen(cl
->qdisc
, len
);
925 hfsc_adjust_levels(struct hfsc_class
*cl
)
927 struct hfsc_class
*p
;
932 list_for_each_entry(p
, &cl
->children
, siblings
) {
933 if (p
->level
>= level
)
934 level
= p
->level
+ 1;
937 } while ((cl
= cl
->cl_parent
) != NULL
);
940 static inline unsigned int
946 return h
& (HFSC_HSIZE
- 1);
949 static inline struct hfsc_class
*
950 hfsc_find_class(u32 classid
, struct Qdisc
*sch
)
952 struct hfsc_sched
*q
= qdisc_priv(sch
);
953 struct hfsc_class
*cl
;
955 list_for_each_entry(cl
, &q
->clhash
[hfsc_hash(classid
)], hlist
) {
956 if (cl
->classid
== classid
)
963 hfsc_change_rsc(struct hfsc_class
*cl
, struct tc_service_curve
*rsc
,
966 sc2isc(rsc
, &cl
->cl_rsc
);
967 rtsc_init(&cl
->cl_deadline
, &cl
->cl_rsc
, cur_time
, cl
->cl_cumul
);
968 cl
->cl_eligible
= cl
->cl_deadline
;
969 if (cl
->cl_rsc
.sm1
<= cl
->cl_rsc
.sm2
) {
970 cl
->cl_eligible
.dx
= 0;
971 cl
->cl_eligible
.dy
= 0;
973 cl
->cl_flags
|= HFSC_RSC
;
977 hfsc_change_fsc(struct hfsc_class
*cl
, struct tc_service_curve
*fsc
)
979 sc2isc(fsc
, &cl
->cl_fsc
);
980 rtsc_init(&cl
->cl_virtual
, &cl
->cl_fsc
, cl
->cl_vt
, cl
->cl_total
);
981 cl
->cl_flags
|= HFSC_FSC
;
985 hfsc_change_usc(struct hfsc_class
*cl
, struct tc_service_curve
*usc
,
988 sc2isc(usc
, &cl
->cl_usc
);
989 rtsc_init(&cl
->cl_ulimit
, &cl
->cl_usc
, cur_time
, cl
->cl_total
);
990 cl
->cl_flags
|= HFSC_USC
;
994 hfsc_change_class(struct Qdisc
*sch
, u32 classid
, u32 parentid
,
995 struct rtattr
**tca
, unsigned long *arg
)
997 struct hfsc_sched
*q
= qdisc_priv(sch
);
998 struct hfsc_class
*cl
= (struct hfsc_class
*)*arg
;
999 struct hfsc_class
*parent
= NULL
;
1000 struct rtattr
*opt
= tca
[TCA_OPTIONS
-1];
1001 struct rtattr
*tb
[TCA_HFSC_MAX
];
1002 struct tc_service_curve
*rsc
= NULL
, *fsc
= NULL
, *usc
= NULL
;
1005 if (opt
== NULL
|| rtattr_parse_nested(tb
, TCA_HFSC_MAX
, opt
))
1008 if (tb
[TCA_HFSC_RSC
-1]) {
1009 if (RTA_PAYLOAD(tb
[TCA_HFSC_RSC
-1]) < sizeof(*rsc
))
1011 rsc
= RTA_DATA(tb
[TCA_HFSC_RSC
-1]);
1012 if (rsc
->m1
== 0 && rsc
->m2
== 0)
1016 if (tb
[TCA_HFSC_FSC
-1]) {
1017 if (RTA_PAYLOAD(tb
[TCA_HFSC_FSC
-1]) < sizeof(*fsc
))
1019 fsc
= RTA_DATA(tb
[TCA_HFSC_FSC
-1]);
1020 if (fsc
->m1
== 0 && fsc
->m2
== 0)
1024 if (tb
[TCA_HFSC_USC
-1]) {
1025 if (RTA_PAYLOAD(tb
[TCA_HFSC_USC
-1]) < sizeof(*usc
))
1027 usc
= RTA_DATA(tb
[TCA_HFSC_USC
-1]);
1028 if (usc
->m1
== 0 && usc
->m2
== 0)
1034 if (cl
->cl_parent
&& cl
->cl_parent
->classid
!= parentid
)
1036 if (cl
->cl_parent
== NULL
&& parentid
!= TC_H_ROOT
)
1039 cur_time
= psched_get_time();
1043 hfsc_change_rsc(cl
, rsc
, cur_time
);
1045 hfsc_change_fsc(cl
, fsc
);
1047 hfsc_change_usc(cl
, usc
, cur_time
);
1049 if (cl
->qdisc
->q
.qlen
!= 0) {
1050 if (cl
->cl_flags
& HFSC_RSC
)
1051 update_ed(cl
, qdisc_peek_len(cl
->qdisc
));
1052 if (cl
->cl_flags
& HFSC_FSC
)
1053 update_vf(cl
, 0, cur_time
);
1055 sch_tree_unlock(sch
);
1057 #ifdef CONFIG_NET_ESTIMATOR
1058 if (tca
[TCA_RATE
-1])
1059 gen_replace_estimator(&cl
->bstats
, &cl
->rate_est
,
1060 cl
->stats_lock
, tca
[TCA_RATE
-1]);
1065 if (parentid
== TC_H_ROOT
)
1070 parent
= hfsc_find_class(parentid
, sch
);
1075 if (classid
== 0 || TC_H_MAJ(classid
^ sch
->handle
) != 0)
1077 if (hfsc_find_class(classid
, sch
))
1080 if (rsc
== NULL
&& fsc
== NULL
)
1083 cl
= kzalloc(sizeof(struct hfsc_class
), GFP_KERNEL
);
1088 hfsc_change_rsc(cl
, rsc
, 0);
1090 hfsc_change_fsc(cl
, fsc
);
1092 hfsc_change_usc(cl
, usc
, 0);
1095 cl
->classid
= classid
;
1097 cl
->cl_parent
= parent
;
1098 cl
->qdisc
= qdisc_create_dflt(sch
->dev
, &pfifo_qdisc_ops
, classid
);
1099 if (cl
->qdisc
== NULL
)
1100 cl
->qdisc
= &noop_qdisc
;
1101 cl
->stats_lock
= &sch
->dev
->queue_lock
;
1102 INIT_LIST_HEAD(&cl
->children
);
1103 cl
->vt_tree
= RB_ROOT
;
1104 cl
->cf_tree
= RB_ROOT
;
1107 list_add_tail(&cl
->hlist
, &q
->clhash
[hfsc_hash(classid
)]);
1108 list_add_tail(&cl
->siblings
, &parent
->children
);
1109 if (parent
->level
== 0)
1110 hfsc_purge_queue(sch
, parent
);
1111 hfsc_adjust_levels(parent
);
1112 cl
->cl_pcvtoff
= parent
->cl_cvtoff
;
1113 sch_tree_unlock(sch
);
1115 #ifdef CONFIG_NET_ESTIMATOR
1116 if (tca
[TCA_RATE
-1])
1117 gen_new_estimator(&cl
->bstats
, &cl
->rate_est
,
1118 cl
->stats_lock
, tca
[TCA_RATE
-1]);
1120 *arg
= (unsigned long)cl
;
1125 hfsc_destroy_class(struct Qdisc
*sch
, struct hfsc_class
*cl
)
1127 struct hfsc_sched
*q
= qdisc_priv(sch
);
1129 tcf_destroy_chain(cl
->filter_list
);
1130 qdisc_destroy(cl
->qdisc
);
1131 #ifdef CONFIG_NET_ESTIMATOR
1132 gen_kill_estimator(&cl
->bstats
, &cl
->rate_est
);
1139 hfsc_delete_class(struct Qdisc
*sch
, unsigned long arg
)
1141 struct hfsc_sched
*q
= qdisc_priv(sch
);
1142 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1144 if (cl
->level
> 0 || cl
->filter_cnt
> 0 || cl
== &q
->root
)
1149 list_del(&cl
->siblings
);
1150 hfsc_adjust_levels(cl
->cl_parent
);
1152 hfsc_purge_queue(sch
, cl
);
1153 list_del(&cl
->hlist
);
1155 if (--cl
->refcnt
== 0)
1156 hfsc_destroy_class(sch
, cl
);
1158 sch_tree_unlock(sch
);
1162 static struct hfsc_class
*
1163 hfsc_classify(struct sk_buff
*skb
, struct Qdisc
*sch
, int *qerr
)
1165 struct hfsc_sched
*q
= qdisc_priv(sch
);
1166 struct hfsc_class
*cl
;
1167 struct tcf_result res
;
1168 struct tcf_proto
*tcf
;
1171 if (TC_H_MAJ(skb
->priority
^ sch
->handle
) == 0 &&
1172 (cl
= hfsc_find_class(skb
->priority
, sch
)) != NULL
)
1176 *qerr
= NET_XMIT_BYPASS
;
1177 tcf
= q
->root
.filter_list
;
1178 while (tcf
&& (result
= tc_classify(skb
, tcf
, &res
)) >= 0) {
1179 #ifdef CONFIG_NET_CLS_ACT
1183 *qerr
= NET_XMIT_SUCCESS
;
1187 #elif defined(CONFIG_NET_CLS_POLICE)
1188 if (result
== TC_POLICE_SHOT
)
1191 if ((cl
= (struct hfsc_class
*)res
.class) == NULL
) {
1192 if ((cl
= hfsc_find_class(res
.classid
, sch
)) == NULL
)
1193 break; /* filter selected invalid classid */
1197 return cl
; /* hit leaf class */
1199 /* apply inner filter chain */
1200 tcf
= cl
->filter_list
;
1203 /* classification failed, try default class */
1204 cl
= hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch
->handle
), q
->defcls
), sch
);
1205 if (cl
== NULL
|| cl
->level
> 0)
1212 hfsc_graft_class(struct Qdisc
*sch
, unsigned long arg
, struct Qdisc
*new,
1215 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1222 new = qdisc_create_dflt(sch
->dev
, &pfifo_qdisc_ops
,
1229 hfsc_purge_queue(sch
, cl
);
1230 *old
= xchg(&cl
->qdisc
, new);
1231 sch_tree_unlock(sch
);
1235 static struct Qdisc
*
1236 hfsc_class_leaf(struct Qdisc
*sch
, unsigned long arg
)
1238 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1240 if (cl
!= NULL
&& cl
->level
== 0)
1247 hfsc_qlen_notify(struct Qdisc
*sch
, unsigned long arg
)
1249 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1251 if (cl
->qdisc
->q
.qlen
== 0) {
1252 update_vf(cl
, 0, 0);
1257 static unsigned long
1258 hfsc_get_class(struct Qdisc
*sch
, u32 classid
)
1260 struct hfsc_class
*cl
= hfsc_find_class(classid
, sch
);
1265 return (unsigned long)cl
;
1269 hfsc_put_class(struct Qdisc
*sch
, unsigned long arg
)
1271 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1273 if (--cl
->refcnt
== 0)
1274 hfsc_destroy_class(sch
, cl
);
1277 static unsigned long
1278 hfsc_bind_tcf(struct Qdisc
*sch
, unsigned long parent
, u32 classid
)
1280 struct hfsc_class
*p
= (struct hfsc_class
*)parent
;
1281 struct hfsc_class
*cl
= hfsc_find_class(classid
, sch
);
1284 if (p
!= NULL
&& p
->level
<= cl
->level
)
1289 return (unsigned long)cl
;
1293 hfsc_unbind_tcf(struct Qdisc
*sch
, unsigned long arg
)
1295 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1300 static struct tcf_proto
**
1301 hfsc_tcf_chain(struct Qdisc
*sch
, unsigned long arg
)
1303 struct hfsc_sched
*q
= qdisc_priv(sch
);
1304 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1309 return &cl
->filter_list
;
1313 hfsc_dump_sc(struct sk_buff
*skb
, int attr
, struct internal_sc
*sc
)
1315 struct tc_service_curve tsc
;
1317 tsc
.m1
= sm2m(sc
->sm1
);
1318 tsc
.d
= dx2d(sc
->dx
);
1319 tsc
.m2
= sm2m(sc
->sm2
);
1320 RTA_PUT(skb
, attr
, sizeof(tsc
), &tsc
);
1329 hfsc_dump_curves(struct sk_buff
*skb
, struct hfsc_class
*cl
)
1331 if ((cl
->cl_flags
& HFSC_RSC
) &&
1332 (hfsc_dump_sc(skb
, TCA_HFSC_RSC
, &cl
->cl_rsc
) < 0))
1333 goto rtattr_failure
;
1335 if ((cl
->cl_flags
& HFSC_FSC
) &&
1336 (hfsc_dump_sc(skb
, TCA_HFSC_FSC
, &cl
->cl_fsc
) < 0))
1337 goto rtattr_failure
;
1339 if ((cl
->cl_flags
& HFSC_USC
) &&
1340 (hfsc_dump_sc(skb
, TCA_HFSC_USC
, &cl
->cl_usc
) < 0))
1341 goto rtattr_failure
;
1350 hfsc_dump_class(struct Qdisc
*sch
, unsigned long arg
, struct sk_buff
*skb
,
1353 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1354 unsigned char *b
= skb_tail_pointer(skb
);
1355 struct rtattr
*rta
= (struct rtattr
*)b
;
1357 tcm
->tcm_parent
= cl
->cl_parent
? cl
->cl_parent
->classid
: TC_H_ROOT
;
1358 tcm
->tcm_handle
= cl
->classid
;
1360 tcm
->tcm_info
= cl
->qdisc
->handle
;
1362 RTA_PUT(skb
, TCA_OPTIONS
, 0, NULL
);
1363 if (hfsc_dump_curves(skb
, cl
) < 0)
1364 goto rtattr_failure
;
1365 rta
->rta_len
= skb_tail_pointer(skb
) - b
;
1374 hfsc_dump_class_stats(struct Qdisc
*sch
, unsigned long arg
,
1375 struct gnet_dump
*d
)
1377 struct hfsc_class
*cl
= (struct hfsc_class
*)arg
;
1378 struct tc_hfsc_stats xstats
;
1380 cl
->qstats
.qlen
= cl
->qdisc
->q
.qlen
;
1381 xstats
.level
= cl
->level
;
1382 xstats
.period
= cl
->cl_vtperiod
;
1383 xstats
.work
= cl
->cl_total
;
1384 xstats
.rtwork
= cl
->cl_cumul
;
1386 if (gnet_stats_copy_basic(d
, &cl
->bstats
) < 0 ||
1387 #ifdef CONFIG_NET_ESTIMATOR
1388 gnet_stats_copy_rate_est(d
, &cl
->rate_est
) < 0 ||
1390 gnet_stats_copy_queue(d
, &cl
->qstats
) < 0)
1393 return gnet_stats_copy_app(d
, &xstats
, sizeof(xstats
));
1399 hfsc_walk(struct Qdisc
*sch
, struct qdisc_walker
*arg
)
1401 struct hfsc_sched
*q
= qdisc_priv(sch
);
1402 struct hfsc_class
*cl
;
1408 for (i
= 0; i
< HFSC_HSIZE
; i
++) {
1409 list_for_each_entry(cl
, &q
->clhash
[i
], hlist
) {
1410 if (arg
->count
< arg
->skip
) {
1414 if (arg
->fn(sch
, (unsigned long)cl
, arg
) < 0) {
1424 hfsc_schedule_watchdog(struct Qdisc
*sch
)
1426 struct hfsc_sched
*q
= qdisc_priv(sch
);
1427 struct hfsc_class
*cl
;
1430 if ((cl
= eltree_get_minel(q
)) != NULL
)
1431 next_time
= cl
->cl_e
;
1432 if (q
->root
.cl_cfmin
!= 0) {
1433 if (next_time
== 0 || next_time
> q
->root
.cl_cfmin
)
1434 next_time
= q
->root
.cl_cfmin
;
1436 WARN_ON(next_time
== 0);
1437 qdisc_watchdog_schedule(&q
->watchdog
, next_time
);
1441 hfsc_init_qdisc(struct Qdisc
*sch
, struct rtattr
*opt
)
1443 struct hfsc_sched
*q
= qdisc_priv(sch
);
1444 struct tc_hfsc_qopt
*qopt
;
1447 if (opt
== NULL
|| RTA_PAYLOAD(opt
) < sizeof(*qopt
))
1449 qopt
= RTA_DATA(opt
);
1451 sch
->stats_lock
= &sch
->dev
->queue_lock
;
1453 q
->defcls
= qopt
->defcls
;
1454 for (i
= 0; i
< HFSC_HSIZE
; i
++)
1455 INIT_LIST_HEAD(&q
->clhash
[i
]);
1456 q
->eligible
= RB_ROOT
;
1457 INIT_LIST_HEAD(&q
->droplist
);
1458 skb_queue_head_init(&q
->requeue
);
1461 q
->root
.classid
= sch
->handle
;
1463 q
->root
.qdisc
= qdisc_create_dflt(sch
->dev
, &pfifo_qdisc_ops
,
1465 if (q
->root
.qdisc
== NULL
)
1466 q
->root
.qdisc
= &noop_qdisc
;
1467 q
->root
.stats_lock
= &sch
->dev
->queue_lock
;
1468 INIT_LIST_HEAD(&q
->root
.children
);
1469 q
->root
.vt_tree
= RB_ROOT
;
1470 q
->root
.cf_tree
= RB_ROOT
;
1472 list_add(&q
->root
.hlist
, &q
->clhash
[hfsc_hash(q
->root
.classid
)]);
1474 qdisc_watchdog_init(&q
->watchdog
, sch
);
1480 hfsc_change_qdisc(struct Qdisc
*sch
, struct rtattr
*opt
)
1482 struct hfsc_sched
*q
= qdisc_priv(sch
);
1483 struct tc_hfsc_qopt
*qopt
;
1485 if (opt
== NULL
|| RTA_PAYLOAD(opt
) < sizeof(*qopt
))
1487 qopt
= RTA_DATA(opt
);
1490 q
->defcls
= qopt
->defcls
;
1491 sch_tree_unlock(sch
);
1497 hfsc_reset_class(struct hfsc_class
*cl
)
1510 cl
->cl_vtperiod
= 0;
1511 cl
->cl_parentperiod
= 0;
1518 cl
->vt_tree
= RB_ROOT
;
1519 cl
->cf_tree
= RB_ROOT
;
1520 qdisc_reset(cl
->qdisc
);
1522 if (cl
->cl_flags
& HFSC_RSC
)
1523 rtsc_init(&cl
->cl_deadline
, &cl
->cl_rsc
, 0, 0);
1524 if (cl
->cl_flags
& HFSC_FSC
)
1525 rtsc_init(&cl
->cl_virtual
, &cl
->cl_fsc
, 0, 0);
1526 if (cl
->cl_flags
& HFSC_USC
)
1527 rtsc_init(&cl
->cl_ulimit
, &cl
->cl_usc
, 0, 0);
1531 hfsc_reset_qdisc(struct Qdisc
*sch
)
1533 struct hfsc_sched
*q
= qdisc_priv(sch
);
1534 struct hfsc_class
*cl
;
1537 for (i
= 0; i
< HFSC_HSIZE
; i
++) {
1538 list_for_each_entry(cl
, &q
->clhash
[i
], hlist
)
1539 hfsc_reset_class(cl
);
1541 __skb_queue_purge(&q
->requeue
);
1542 q
->eligible
= RB_ROOT
;
1543 INIT_LIST_HEAD(&q
->droplist
);
1544 qdisc_watchdog_cancel(&q
->watchdog
);
1549 hfsc_destroy_qdisc(struct Qdisc
*sch
)
1551 struct hfsc_sched
*q
= qdisc_priv(sch
);
1552 struct hfsc_class
*cl
, *next
;
1555 for (i
= 0; i
< HFSC_HSIZE
; i
++) {
1556 list_for_each_entry_safe(cl
, next
, &q
->clhash
[i
], hlist
)
1557 hfsc_destroy_class(sch
, cl
);
1559 __skb_queue_purge(&q
->requeue
);
1560 qdisc_watchdog_cancel(&q
->watchdog
);
1564 hfsc_dump_qdisc(struct Qdisc
*sch
, struct sk_buff
*skb
)
1566 struct hfsc_sched
*q
= qdisc_priv(sch
);
1567 unsigned char *b
= skb_tail_pointer(skb
);
1568 struct tc_hfsc_qopt qopt
;
1570 qopt
.defcls
= q
->defcls
;
1571 RTA_PUT(skb
, TCA_OPTIONS
, sizeof(qopt
), &qopt
);
1580 hfsc_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
)
1582 struct hfsc_class
*cl
;
1586 cl
= hfsc_classify(skb
, sch
, &err
);
1588 if (err
== NET_XMIT_BYPASS
)
1589 sch
->qstats
.drops
++;
1595 err
= cl
->qdisc
->enqueue(skb
, cl
->qdisc
);
1596 if (unlikely(err
!= NET_XMIT_SUCCESS
)) {
1598 sch
->qstats
.drops
++;
1602 if (cl
->qdisc
->q
.qlen
== 1)
1603 set_active(cl
, len
);
1605 cl
->bstats
.packets
++;
1606 cl
->bstats
.bytes
+= len
;
1607 sch
->bstats
.packets
++;
1608 sch
->bstats
.bytes
+= len
;
1611 return NET_XMIT_SUCCESS
;
1614 static struct sk_buff
*
1615 hfsc_dequeue(struct Qdisc
*sch
)
1617 struct hfsc_sched
*q
= qdisc_priv(sch
);
1618 struct hfsc_class
*cl
;
1619 struct sk_buff
*skb
;
1621 unsigned int next_len
;
1624 if (sch
->q
.qlen
== 0)
1626 if ((skb
= __skb_dequeue(&q
->requeue
)))
1629 cur_time
= psched_get_time();
1632 * if there are eligible classes, use real-time criteria.
1633 * find the class with the minimum deadline among
1634 * the eligible classes.
1636 if ((cl
= eltree_get_mindl(q
, cur_time
)) != NULL
) {
1640 * use link-sharing criteria
1641 * get the class with the minimum vt in the hierarchy
1643 cl
= vttree_get_minvt(&q
->root
, cur_time
);
1645 sch
->qstats
.overlimits
++;
1646 hfsc_schedule_watchdog(sch
);
1651 skb
= cl
->qdisc
->dequeue(cl
->qdisc
);
1653 if (net_ratelimit())
1654 printk("HFSC: Non-work-conserving qdisc ?\n");
1658 update_vf(cl
, skb
->len
, cur_time
);
1660 cl
->cl_cumul
+= skb
->len
;
1662 if (cl
->qdisc
->q
.qlen
!= 0) {
1663 if (cl
->cl_flags
& HFSC_RSC
) {
1665 next_len
= qdisc_peek_len(cl
->qdisc
);
1667 update_ed(cl
, next_len
);
1669 update_d(cl
, next_len
);
1672 /* the class becomes passive */
1677 sch
->flags
&= ~TCQ_F_THROTTLED
;
1684 hfsc_requeue(struct sk_buff
*skb
, struct Qdisc
*sch
)
1686 struct hfsc_sched
*q
= qdisc_priv(sch
);
1688 __skb_queue_head(&q
->requeue
, skb
);
1690 sch
->qstats
.requeues
++;
1691 return NET_XMIT_SUCCESS
;
1695 hfsc_drop(struct Qdisc
*sch
)
1697 struct hfsc_sched
*q
= qdisc_priv(sch
);
1698 struct hfsc_class
*cl
;
1701 list_for_each_entry(cl
, &q
->droplist
, dlist
) {
1702 if (cl
->qdisc
->ops
->drop
!= NULL
&&
1703 (len
= cl
->qdisc
->ops
->drop(cl
->qdisc
)) > 0) {
1704 if (cl
->qdisc
->q
.qlen
== 0) {
1705 update_vf(cl
, 0, 0);
1708 list_move_tail(&cl
->dlist
, &q
->droplist
);
1711 sch
->qstats
.drops
++;
1719 static struct Qdisc_class_ops hfsc_class_ops
= {
1720 .change
= hfsc_change_class
,
1721 .delete = hfsc_delete_class
,
1722 .graft
= hfsc_graft_class
,
1723 .leaf
= hfsc_class_leaf
,
1724 .qlen_notify
= hfsc_qlen_notify
,
1725 .get
= hfsc_get_class
,
1726 .put
= hfsc_put_class
,
1727 .bind_tcf
= hfsc_bind_tcf
,
1728 .unbind_tcf
= hfsc_unbind_tcf
,
1729 .tcf_chain
= hfsc_tcf_chain
,
1730 .dump
= hfsc_dump_class
,
1731 .dump_stats
= hfsc_dump_class_stats
,
1735 static struct Qdisc_ops hfsc_qdisc_ops
= {
1737 .init
= hfsc_init_qdisc
,
1738 .change
= hfsc_change_qdisc
,
1739 .reset
= hfsc_reset_qdisc
,
1740 .destroy
= hfsc_destroy_qdisc
,
1741 .dump
= hfsc_dump_qdisc
,
1742 .enqueue
= hfsc_enqueue
,
1743 .dequeue
= hfsc_dequeue
,
1744 .requeue
= hfsc_requeue
,
1746 .cl_ops
= &hfsc_class_ops
,
1747 .priv_size
= sizeof(struct hfsc_sched
),
1748 .owner
= THIS_MODULE
1754 return register_qdisc(&hfsc_qdisc_ops
);
1760 unregister_qdisc(&hfsc_qdisc_ops
);
1763 MODULE_LICENSE("GPL");
1764 module_init(hfsc_init
);
1765 module_exit(hfsc_cleanup
);