2 * net/sched/sch_qfq.c Quick Fair Queueing Plus Scheduler.
4 * Copyright (c) 2009 Fabio Checconi, Luigi Rizzo, and Paolo Valente.
5 * Copyright (c) 2012 Paolo Valente.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/bitops.h>
15 #include <linux/errno.h>
16 #include <linux/netdevice.h>
17 #include <linux/pkt_sched.h>
18 #include <net/sch_generic.h>
19 #include <net/pkt_sched.h>
20 #include <net/pkt_cls.h>
23 /* Quick Fair Queueing Plus
24 ========================
29 "Reducing the Execution Time of Fair-Queueing Schedulers."
30 http://algo.ing.unimo.it/people/paolo/agg-sched/agg-sched.pdf
34 [2] Fabio Checconi, Luigi Rizzo, and Paolo Valente: "QFQ: Efficient
35 Packet Scheduling with Tight Bandwidth Distribution Guarantees."
38 http://retis.sssup.it/~fabio/linux/qfq/
43 QFQ+ divides classes into aggregates of at most MAX_AGG_CLASSES
44 classes. Each aggregate is timestamped with a virtual start time S
45 and a virtual finish time F, and scheduled according to its
46 timestamps. S and F are computed as a function of a system virtual
47 time function V. The classes within each aggregate are instead
50 To speed up operations, QFQ+ divides also aggregates into a limited
51 number of groups. Which group a class belongs to depends on the
52 ratio between the maximum packet length for the class and the weight
53 of the class. Groups have their own S and F. In the end, QFQ+
54 schedules groups, then aggregates within groups, then classes within
55 aggregates. See [1] and [2] for a full description.
57 Virtual time computations.
59 S, F and V are all computed in fixed point arithmetic with
60 FRAC_BITS decimal bits.
62 QFQ_MAX_INDEX is the maximum index allowed for a group. We need
64 QFQ_MAX_WSHIFT is the maximum power of two supported as a weight.
66 The layout of the bits is as below:
68 [ MTU_SHIFT ][ FRAC_BITS ]
69 [ MAX_INDEX ][ MIN_SLOT_SHIFT ]
73 where MIN_SLOT_SHIFT is derived by difference from the others.
75 The max group index corresponds to Lmax/w_min, where
76 Lmax=1<<MTU_SHIFT, w_min = 1 .
77 From this, and knowing how many groups (MAX_INDEX) we want,
78 we can derive the shift corresponding to each group.
80 Because we often need to compute
81 F = S + len/w_i and V = V + len/wsum
82 instead of storing w_i store the value
83 inv_w = (1<<FRAC_BITS)/w_i
84 so we can do F = S + len * inv_w * wsum.
85 We use W_TOT in the formulas so we can easily move between
86 static and adaptive weight sum.
88 The per-scheduler-instance data contain all the data structures
89 for the scheduler: bitmaps and bucket lists.
94 * Maximum number of consecutive slots occupied by backlogged classes
97 #define QFQ_MAX_SLOTS 32
100 * Shifts used for aggregate<->group mapping. We allow class weights that are
101 * in the range [1, 2^MAX_WSHIFT], and we try to map each aggregate i to the
102 * group with the smallest index that can support the L_i / r_i configured
103 * for the classes in the aggregate.
105 * grp->index is the index of the group; and grp->slot_shift
106 * is the shift for the corresponding (scaled) sigma_i.
108 #define QFQ_MAX_INDEX 24
109 #define QFQ_MAX_WSHIFT 10
111 #define QFQ_MAX_WEIGHT (1<<QFQ_MAX_WSHIFT) /* see qfq_slot_insert */
112 #define QFQ_MAX_WSUM (64*QFQ_MAX_WEIGHT)
114 #define FRAC_BITS 30 /* fixed point arithmetic */
115 #define ONE_FP (1UL << FRAC_BITS)
116 #define IWSUM (ONE_FP/QFQ_MAX_WSUM)
118 #define QFQ_MTU_SHIFT 16 /* to support TSO/GSO */
119 #define QFQ_MIN_LMAX 512 /* see qfq_slot_insert */
121 #define QFQ_MAX_AGG_CLASSES 8 /* max num classes per aggregate allowed */
124 * Possible group states. These values are used as indexes for the bitmaps
125 * array of struct qfq_queue.
127 enum qfq_state
{ ER
, IR
, EB
, IB
, QFQ_MAX_STATE
};
131 struct qfq_aggregate
;
134 struct Qdisc_class_common common
;
137 unsigned int filter_cnt
;
139 struct gnet_stats_basic_packed bstats
;
140 struct gnet_stats_queue qstats
;
141 struct gnet_stats_rate_est rate_est
;
143 struct list_head alist
; /* Link for active-classes list. */
144 struct qfq_aggregate
*agg
; /* Parent aggregate. */
145 int deficit
; /* DRR deficit counter. */
148 struct qfq_aggregate
{
149 struct hlist_node next
; /* Link for the slot list. */
150 u64 S
, F
; /* flow timestamps (exact) */
152 /* group we belong to. In principle we would need the index,
153 * which is log_2(lmax/weight), but we never reference it
154 * directly, only the group.
156 struct qfq_group
*grp
;
158 /* these are copied from the flowset. */
159 u32 class_weight
; /* Weight of each class in this aggregate. */
160 /* Max pkt size for the classes in this aggregate, DRR quantum. */
163 u32 inv_w
; /* ONE_FP/(sum of weights of classes in aggr.). */
164 u32 budgetmax
; /* Max budget for this aggregate. */
165 u32 initial_budget
, budget
; /* Initial and current budget. */
167 int num_classes
; /* Number of classes in this aggr. */
168 struct list_head active
; /* DRR queue of active classes. */
170 struct hlist_node nonfull_next
; /* See nonfull_aggs in qfq_sched. */
174 u64 S
, F
; /* group timestamps (approx). */
175 unsigned int slot_shift
; /* Slot shift. */
176 unsigned int index
; /* Group index. */
177 unsigned int front
; /* Index of the front slot. */
178 unsigned long full_slots
; /* non-empty slots */
180 /* Array of RR lists of active aggregates. */
181 struct hlist_head slots
[QFQ_MAX_SLOTS
];
185 struct tcf_proto
*filter_list
;
186 struct Qdisc_class_hash clhash
;
188 u64 oldV
, V
; /* Precise virtual times. */
189 struct qfq_aggregate
*in_serv_agg
; /* Aggregate being served. */
190 u32 num_active_agg
; /* Num. of active aggregates */
191 u32 wsum
; /* weight sum */
193 unsigned long bitmaps
[QFQ_MAX_STATE
]; /* Group bitmaps. */
194 struct qfq_group groups
[QFQ_MAX_INDEX
+ 1]; /* The groups. */
195 u32 min_slot_shift
; /* Index of the group-0 bit in the bitmaps. */
197 u32 max_agg_classes
; /* Max number of classes per aggr. */
198 struct hlist_head nonfull_aggs
; /* Aggs with room for more classes. */
202 * Possible reasons why the timestamps of an aggregate are updated
203 * enqueue: the aggregate switches from idle to active and must scheduled
205 * requeue: the aggregate finishes its budget, so it stops being served and
206 * must be rescheduled for service
208 enum update_reason
{enqueue
, requeue
};
210 static struct qfq_class
*qfq_find_class(struct Qdisc
*sch
, u32 classid
)
212 struct qfq_sched
*q
= qdisc_priv(sch
);
213 struct Qdisc_class_common
*clc
;
215 clc
= qdisc_class_find(&q
->clhash
, classid
);
218 return container_of(clc
, struct qfq_class
, common
);
221 static void qfq_purge_queue(struct qfq_class
*cl
)
223 unsigned int len
= cl
->qdisc
->q
.qlen
;
225 qdisc_reset(cl
->qdisc
);
226 qdisc_tree_decrease_qlen(cl
->qdisc
, len
);
229 static const struct nla_policy qfq_policy
[TCA_QFQ_MAX
+ 1] = {
230 [TCA_QFQ_WEIGHT
] = { .type
= NLA_U32
},
231 [TCA_QFQ_LMAX
] = { .type
= NLA_U32
},
235 * Calculate a flow index, given its weight and maximum packet length.
236 * index = log_2(maxlen/weight) but we need to apply the scaling.
237 * This is used only once at flow creation.
239 static int qfq_calc_index(u32 inv_w
, unsigned int maxlen
, u32 min_slot_shift
)
241 u64 slot_size
= (u64
)maxlen
* inv_w
;
242 unsigned long size_map
;
245 size_map
= slot_size
>> min_slot_shift
;
249 index
= __fls(size_map
) + 1; /* basically a log_2 */
250 index
-= !(slot_size
- (1ULL << (index
+ min_slot_shift
- 1)));
255 pr_debug("qfq calc_index: W = %lu, L = %u, I = %d\n",
256 (unsigned long) ONE_FP
/inv_w
, maxlen
, index
);
261 static void qfq_deactivate_agg(struct qfq_sched
*, struct qfq_aggregate
*);
262 static void qfq_activate_agg(struct qfq_sched
*, struct qfq_aggregate
*,
265 static void qfq_init_agg(struct qfq_sched
*q
, struct qfq_aggregate
*agg
,
266 u32 lmax
, u32 weight
)
268 INIT_LIST_HEAD(&agg
->active
);
269 hlist_add_head(&agg
->nonfull_next
, &q
->nonfull_aggs
);
272 agg
->class_weight
= weight
;
275 static struct qfq_aggregate
*qfq_find_agg(struct qfq_sched
*q
,
276 u32 lmax
, u32 weight
)
278 struct qfq_aggregate
*agg
;
279 struct hlist_node
*n
;
281 hlist_for_each_entry(agg
, n
, &q
->nonfull_aggs
, nonfull_next
)
282 if (agg
->lmax
== lmax
&& agg
->class_weight
== weight
)
289 /* Update aggregate as a function of the new number of classes. */
290 static void qfq_update_agg(struct qfq_sched
*q
, struct qfq_aggregate
*agg
,
295 if (new_num_classes
== q
->max_agg_classes
)
296 hlist_del_init(&agg
->nonfull_next
);
298 if (agg
->num_classes
> new_num_classes
&&
299 new_num_classes
== q
->max_agg_classes
- 1) /* agg no more full */
300 hlist_add_head(&agg
->nonfull_next
, &q
->nonfull_aggs
);
302 agg
->budgetmax
= new_num_classes
* agg
->lmax
;
303 new_agg_weight
= agg
->class_weight
* new_num_classes
;
304 agg
->inv_w
= ONE_FP
/new_agg_weight
;
306 if (agg
->grp
== NULL
) {
307 int i
= qfq_calc_index(agg
->inv_w
, agg
->budgetmax
,
309 agg
->grp
= &q
->groups
[i
];
313 (int) agg
->class_weight
* (new_num_classes
- agg
->num_classes
);
315 agg
->num_classes
= new_num_classes
;
318 /* Add class to aggregate. */
319 static void qfq_add_to_agg(struct qfq_sched
*q
,
320 struct qfq_aggregate
*agg
,
321 struct qfq_class
*cl
)
325 qfq_update_agg(q
, agg
, agg
->num_classes
+1);
326 if (cl
->qdisc
->q
.qlen
> 0) { /* adding an active class */
327 list_add_tail(&cl
->alist
, &agg
->active
);
328 if (list_first_entry(&agg
->active
, struct qfq_class
, alist
) ==
329 cl
&& q
->in_serv_agg
!= agg
) /* agg was inactive */
330 qfq_activate_agg(q
, agg
, enqueue
); /* schedule agg */
334 static struct qfq_aggregate
*qfq_choose_next_agg(struct qfq_sched
*);
336 static void qfq_destroy_agg(struct qfq_sched
*q
, struct qfq_aggregate
*agg
)
338 if (!hlist_unhashed(&agg
->nonfull_next
))
339 hlist_del_init(&agg
->nonfull_next
);
340 if (q
->in_serv_agg
== agg
)
341 q
->in_serv_agg
= qfq_choose_next_agg(q
);
345 /* Deschedule class from within its parent aggregate. */
346 static void qfq_deactivate_class(struct qfq_sched
*q
, struct qfq_class
*cl
)
348 struct qfq_aggregate
*agg
= cl
->agg
;
351 list_del(&cl
->alist
); /* remove from RR queue of the aggregate */
352 if (list_empty(&agg
->active
)) /* agg is now inactive */
353 qfq_deactivate_agg(q
, agg
);
356 /* Remove class from its parent aggregate. */
357 static void qfq_rm_from_agg(struct qfq_sched
*q
, struct qfq_class
*cl
)
359 struct qfq_aggregate
*agg
= cl
->agg
;
362 if (agg
->num_classes
== 1) { /* agg being emptied, destroy it */
363 qfq_destroy_agg(q
, agg
);
366 qfq_update_agg(q
, agg
, agg
->num_classes
-1);
369 /* Deschedule class and remove it from its parent aggregate. */
370 static void qfq_deact_rm_from_agg(struct qfq_sched
*q
, struct qfq_class
*cl
)
372 if (cl
->qdisc
->q
.qlen
> 0) /* class is active */
373 qfq_deactivate_class(q
, cl
);
375 qfq_rm_from_agg(q
, cl
);
378 /* Move class to a new aggregate, matching the new class weight and/or lmax */
379 static int qfq_change_agg(struct Qdisc
*sch
, struct qfq_class
*cl
, u32 weight
,
382 struct qfq_sched
*q
= qdisc_priv(sch
);
383 struct qfq_aggregate
*new_agg
= qfq_find_agg(q
, lmax
, weight
);
385 if (new_agg
== NULL
) { /* create new aggregate */
386 new_agg
= kzalloc(sizeof(*new_agg
), GFP_ATOMIC
);
389 qfq_init_agg(q
, new_agg
, lmax
, weight
);
391 qfq_deact_rm_from_agg(q
, cl
);
392 qfq_add_to_agg(q
, new_agg
, cl
);
397 static int qfq_change_class(struct Qdisc
*sch
, u32 classid
, u32 parentid
,
398 struct nlattr
**tca
, unsigned long *arg
)
400 struct qfq_sched
*q
= qdisc_priv(sch
);
401 struct qfq_class
*cl
= (struct qfq_class
*)*arg
;
402 bool existing
= false;
403 struct nlattr
*tb
[TCA_QFQ_MAX
+ 1];
404 struct qfq_aggregate
*new_agg
= NULL
;
405 u32 weight
, lmax
, inv_w
;
409 if (tca
[TCA_OPTIONS
] == NULL
) {
410 pr_notice("qfq: no options\n");
414 err
= nla_parse_nested(tb
, TCA_QFQ_MAX
, tca
[TCA_OPTIONS
], qfq_policy
);
418 if (tb
[TCA_QFQ_WEIGHT
]) {
419 weight
= nla_get_u32(tb
[TCA_QFQ_WEIGHT
]);
420 if (!weight
|| weight
> (1UL << QFQ_MAX_WSHIFT
)) {
421 pr_notice("qfq: invalid weight %u\n", weight
);
427 if (tb
[TCA_QFQ_LMAX
]) {
428 lmax
= nla_get_u32(tb
[TCA_QFQ_LMAX
]);
429 if (lmax
< QFQ_MIN_LMAX
|| lmax
> (1UL << QFQ_MTU_SHIFT
)) {
430 pr_notice("qfq: invalid max length %u\n", lmax
);
434 lmax
= psched_mtu(qdisc_dev(sch
));
436 inv_w
= ONE_FP
/ weight
;
437 weight
= ONE_FP
/ inv_w
;
440 lmax
== cl
->agg
->lmax
&&
441 weight
== cl
->agg
->class_weight
)
442 return 0; /* nothing to change */
444 delta_w
= weight
- (cl
? cl
->agg
->class_weight
: 0);
446 if (q
->wsum
+ delta_w
> QFQ_MAX_WSUM
) {
447 pr_notice("qfq: total weight out of range (%d + %u)\n",
452 if (cl
!= NULL
) { /* modify existing class */
454 err
= gen_replace_estimator(&cl
->bstats
, &cl
->rate_est
,
455 qdisc_root_sleeping_lock(sch
),
464 /* create and init new class */
465 cl
= kzalloc(sizeof(struct qfq_class
), GFP_KERNEL
);
470 cl
->common
.classid
= classid
;
473 cl
->qdisc
= qdisc_create_dflt(sch
->dev_queue
,
474 &pfifo_qdisc_ops
, classid
);
475 if (cl
->qdisc
== NULL
)
476 cl
->qdisc
= &noop_qdisc
;
479 err
= gen_new_estimator(&cl
->bstats
, &cl
->rate_est
,
480 qdisc_root_sleeping_lock(sch
),
487 qdisc_class_hash_insert(&q
->clhash
, &cl
->common
);
488 sch_tree_unlock(sch
);
490 qdisc_class_hash_grow(sch
, &q
->clhash
);
494 new_agg
= qfq_find_agg(q
, lmax
, weight
);
495 if (new_agg
== NULL
) { /* create new aggregate */
496 sch_tree_unlock(sch
);
497 new_agg
= kzalloc(sizeof(*new_agg
), GFP_KERNEL
);
498 if (new_agg
== NULL
) {
500 gen_kill_estimator(&cl
->bstats
, &cl
->rate_est
);
504 qfq_init_agg(q
, new_agg
, lmax
, weight
);
507 qfq_deact_rm_from_agg(q
, cl
);
508 qfq_add_to_agg(q
, new_agg
, cl
);
509 sch_tree_unlock(sch
);
511 *arg
= (unsigned long)cl
;
515 qdisc_destroy(cl
->qdisc
);
520 static void qfq_destroy_class(struct Qdisc
*sch
, struct qfq_class
*cl
)
522 struct qfq_sched
*q
= qdisc_priv(sch
);
524 qfq_rm_from_agg(q
, cl
);
525 gen_kill_estimator(&cl
->bstats
, &cl
->rate_est
);
526 qdisc_destroy(cl
->qdisc
);
530 static int qfq_delete_class(struct Qdisc
*sch
, unsigned long arg
)
532 struct qfq_sched
*q
= qdisc_priv(sch
);
533 struct qfq_class
*cl
= (struct qfq_class
*)arg
;
535 if (cl
->filter_cnt
> 0)
541 qdisc_class_hash_remove(&q
->clhash
, &cl
->common
);
543 BUG_ON(--cl
->refcnt
== 0);
545 * This shouldn't happen: we "hold" one cops->get() when called
546 * from tc_ctl_tclass; the destroy method is done from cops->put().
549 sch_tree_unlock(sch
);
553 static unsigned long qfq_get_class(struct Qdisc
*sch
, u32 classid
)
555 struct qfq_class
*cl
= qfq_find_class(sch
, classid
);
560 return (unsigned long)cl
;
563 static void qfq_put_class(struct Qdisc
*sch
, unsigned long arg
)
565 struct qfq_class
*cl
= (struct qfq_class
*)arg
;
567 if (--cl
->refcnt
== 0)
568 qfq_destroy_class(sch
, cl
);
571 static struct tcf_proto
**qfq_tcf_chain(struct Qdisc
*sch
, unsigned long cl
)
573 struct qfq_sched
*q
= qdisc_priv(sch
);
578 return &q
->filter_list
;
581 static unsigned long qfq_bind_tcf(struct Qdisc
*sch
, unsigned long parent
,
584 struct qfq_class
*cl
= qfq_find_class(sch
, classid
);
589 return (unsigned long)cl
;
592 static void qfq_unbind_tcf(struct Qdisc
*sch
, unsigned long arg
)
594 struct qfq_class
*cl
= (struct qfq_class
*)arg
;
599 static int qfq_graft_class(struct Qdisc
*sch
, unsigned long arg
,
600 struct Qdisc
*new, struct Qdisc
**old
)
602 struct qfq_class
*cl
= (struct qfq_class
*)arg
;
605 new = qdisc_create_dflt(sch
->dev_queue
,
606 &pfifo_qdisc_ops
, cl
->common
.classid
);
615 sch_tree_unlock(sch
);
619 static struct Qdisc
*qfq_class_leaf(struct Qdisc
*sch
, unsigned long arg
)
621 struct qfq_class
*cl
= (struct qfq_class
*)arg
;
626 static int qfq_dump_class(struct Qdisc
*sch
, unsigned long arg
,
627 struct sk_buff
*skb
, struct tcmsg
*tcm
)
629 struct qfq_class
*cl
= (struct qfq_class
*)arg
;
632 tcm
->tcm_parent
= TC_H_ROOT
;
633 tcm
->tcm_handle
= cl
->common
.classid
;
634 tcm
->tcm_info
= cl
->qdisc
->handle
;
636 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
638 goto nla_put_failure
;
639 if (nla_put_u32(skb
, TCA_QFQ_WEIGHT
, cl
->agg
->class_weight
) ||
640 nla_put_u32(skb
, TCA_QFQ_LMAX
, cl
->agg
->lmax
))
641 goto nla_put_failure
;
642 return nla_nest_end(skb
, nest
);
645 nla_nest_cancel(skb
, nest
);
649 static int qfq_dump_class_stats(struct Qdisc
*sch
, unsigned long arg
,
652 struct qfq_class
*cl
= (struct qfq_class
*)arg
;
653 struct tc_qfq_stats xstats
;
655 memset(&xstats
, 0, sizeof(xstats
));
656 cl
->qdisc
->qstats
.qlen
= cl
->qdisc
->q
.qlen
;
658 xstats
.weight
= cl
->agg
->class_weight
;
659 xstats
.lmax
= cl
->agg
->lmax
;
661 if (gnet_stats_copy_basic(d
, &cl
->bstats
) < 0 ||
662 gnet_stats_copy_rate_est(d
, &cl
->bstats
, &cl
->rate_est
) < 0 ||
663 gnet_stats_copy_queue(d
, &cl
->qdisc
->qstats
) < 0)
666 return gnet_stats_copy_app(d
, &xstats
, sizeof(xstats
));
669 static void qfq_walk(struct Qdisc
*sch
, struct qdisc_walker
*arg
)
671 struct qfq_sched
*q
= qdisc_priv(sch
);
672 struct qfq_class
*cl
;
673 struct hlist_node
*n
;
679 for (i
= 0; i
< q
->clhash
.hashsize
; i
++) {
680 hlist_for_each_entry(cl
, n
, &q
->clhash
.hash
[i
], common
.hnode
) {
681 if (arg
->count
< arg
->skip
) {
685 if (arg
->fn(sch
, (unsigned long)cl
, arg
) < 0) {
694 static struct qfq_class
*qfq_classify(struct sk_buff
*skb
, struct Qdisc
*sch
,
697 struct qfq_sched
*q
= qdisc_priv(sch
);
698 struct qfq_class
*cl
;
699 struct tcf_result res
;
702 if (TC_H_MAJ(skb
->priority
^ sch
->handle
) == 0) {
703 pr_debug("qfq_classify: found %d\n", skb
->priority
);
704 cl
= qfq_find_class(sch
, skb
->priority
);
709 *qerr
= NET_XMIT_SUCCESS
| __NET_XMIT_BYPASS
;
710 result
= tc_classify(skb
, q
->filter_list
, &res
);
712 #ifdef CONFIG_NET_CLS_ACT
716 *qerr
= NET_XMIT_SUCCESS
| __NET_XMIT_STOLEN
;
721 cl
= (struct qfq_class
*)res
.class;
723 cl
= qfq_find_class(sch
, res
.classid
);
730 /* Generic comparison function, handling wraparound. */
731 static inline int qfq_gt(u64 a
, u64 b
)
733 return (s64
)(a
- b
) > 0;
736 /* Round a precise timestamp to its slotted value. */
737 static inline u64
qfq_round_down(u64 ts
, unsigned int shift
)
739 return ts
& ~((1ULL << shift
) - 1);
742 /* return the pointer to the group with lowest index in the bitmap */
743 static inline struct qfq_group
*qfq_ffs(struct qfq_sched
*q
,
744 unsigned long bitmap
)
746 int index
= __ffs(bitmap
);
747 return &q
->groups
[index
];
749 /* Calculate a mask to mimic what would be ffs_from(). */
750 static inline unsigned long mask_from(unsigned long bitmap
, int from
)
752 return bitmap
& ~((1UL << from
) - 1);
756 * The state computation relies on ER=0, IR=1, EB=2, IB=3
757 * First compute eligibility comparing grp->S, q->V,
758 * then check if someone is blocking us and possibly add EB
760 static int qfq_calc_state(struct qfq_sched
*q
, const struct qfq_group
*grp
)
762 /* if S > V we are not eligible */
763 unsigned int state
= qfq_gt(grp
->S
, q
->V
);
764 unsigned long mask
= mask_from(q
->bitmaps
[ER
], grp
->index
);
765 struct qfq_group
*next
;
768 next
= qfq_ffs(q
, mask
);
769 if (qfq_gt(grp
->F
, next
->F
))
779 * q->bitmaps[dst] |= q->bitmaps[src] & mask;
780 * q->bitmaps[src] &= ~mask;
781 * but we should make sure that src != dst
783 static inline void qfq_move_groups(struct qfq_sched
*q
, unsigned long mask
,
786 q
->bitmaps
[dst
] |= q
->bitmaps
[src
] & mask
;
787 q
->bitmaps
[src
] &= ~mask
;
790 static void qfq_unblock_groups(struct qfq_sched
*q
, int index
, u64 old_F
)
792 unsigned long mask
= mask_from(q
->bitmaps
[ER
], index
+ 1);
793 struct qfq_group
*next
;
796 next
= qfq_ffs(q
, mask
);
797 if (!qfq_gt(next
->F
, old_F
))
801 mask
= (1UL << index
) - 1;
802 qfq_move_groups(q
, mask
, EB
, ER
);
803 qfq_move_groups(q
, mask
, IB
, IR
);
810 old_V >>= q->min_slot_shift;
816 static void qfq_make_eligible(struct qfq_sched
*q
)
818 unsigned long vslot
= q
->V
>> q
->min_slot_shift
;
819 unsigned long old_vslot
= q
->oldV
>> q
->min_slot_shift
;
821 if (vslot
!= old_vslot
) {
822 unsigned long mask
= (1UL << fls(vslot
^ old_vslot
)) - 1;
823 qfq_move_groups(q
, mask
, IR
, ER
);
824 qfq_move_groups(q
, mask
, IB
, EB
);
830 * The index of the slot in which the aggregate is to be inserted must
831 * not be higher than QFQ_MAX_SLOTS-2. There is a '-2' and not a '-1'
832 * because the start time of the group may be moved backward by one
833 * slot after the aggregate has been inserted, and this would cause
834 * non-empty slots to be right-shifted by one position.
836 * If the weight and lmax (max_pkt_size) of the classes do not change,
837 * then QFQ+ does meet the above contraint according to the current
838 * values of its parameters. In fact, if the weight and lmax of the
839 * classes do not change, then, from the theory, QFQ+ guarantees that
840 * the slot index is never higher than
841 * 2 + QFQ_MAX_AGG_CLASSES * ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) *
842 * (QFQ_MAX_WEIGHT/QFQ_MAX_WSUM) = 2 + 8 * 128 * (1 / 64) = 18
844 * When the weight of a class is increased or the lmax of the class is
845 * decreased, a new aggregate with smaller slot size than the original
846 * parent aggregate of the class may happen to be activated. The
847 * activation of this aggregate should be properly delayed to when the
848 * service of the class has finished in the ideal system tracked by
849 * QFQ+. If the activation of the aggregate is not delayed to this
850 * reference time instant, then this aggregate may be unjustly served
851 * before other aggregates waiting for service. This may cause the
852 * above bound to the slot index to be violated for some of these
853 * unlucky aggregates.
855 * Instead of delaying the activation of the new aggregate, which is
856 * quite complex, the following inaccurate but simple solution is used:
857 * if the slot index is higher than QFQ_MAX_SLOTS-2, then the
858 * timestamps of the aggregate are shifted backward so as to let the
859 * slot index become equal to QFQ_MAX_SLOTS-2.
861 static void qfq_slot_insert(struct qfq_group
*grp
, struct qfq_aggregate
*agg
,
864 u64 slot
= (roundedS
- grp
->S
) >> grp
->slot_shift
;
865 unsigned int i
; /* slot index in the bucket list */
867 if (unlikely(slot
> QFQ_MAX_SLOTS
- 2)) {
868 u64 deltaS
= roundedS
- grp
->S
-
869 ((u64
)(QFQ_MAX_SLOTS
- 2)<<grp
->slot_shift
);
872 slot
= QFQ_MAX_SLOTS
- 2;
875 i
= (grp
->front
+ slot
) % QFQ_MAX_SLOTS
;
877 hlist_add_head(&agg
->next
, &grp
->slots
[i
]);
878 __set_bit(slot
, &grp
->full_slots
);
881 /* Maybe introduce hlist_first_entry?? */
882 static struct qfq_aggregate
*qfq_slot_head(struct qfq_group
*grp
)
884 return hlist_entry(grp
->slots
[grp
->front
].first
,
885 struct qfq_aggregate
, next
);
889 * remove the entry from the slot
891 static void qfq_front_slot_remove(struct qfq_group
*grp
)
893 struct qfq_aggregate
*agg
= qfq_slot_head(grp
);
896 hlist_del(&agg
->next
);
897 if (hlist_empty(&grp
->slots
[grp
->front
]))
898 __clear_bit(0, &grp
->full_slots
);
902 * Returns the first aggregate in the first non-empty bucket of the
903 * group. As a side effect, adjusts the bucket list so the first
904 * non-empty bucket is at position 0 in full_slots.
906 static struct qfq_aggregate
*qfq_slot_scan(struct qfq_group
*grp
)
910 pr_debug("qfq slot_scan: grp %u full %#lx\n",
911 grp
->index
, grp
->full_slots
);
913 if (grp
->full_slots
== 0)
916 i
= __ffs(grp
->full_slots
); /* zero based */
918 grp
->front
= (grp
->front
+ i
) % QFQ_MAX_SLOTS
;
919 grp
->full_slots
>>= i
;
922 return qfq_slot_head(grp
);
926 * adjust the bucket list. When the start time of a group decreases,
927 * we move the index down (modulo QFQ_MAX_SLOTS) so we don't need to
928 * move the objects. The mask of occupied slots must be shifted
929 * because we use ffs() to find the first non-empty slot.
930 * This covers decreases in the group's start time, but what about
931 * increases of the start time ?
932 * Here too we should make sure that i is less than 32
934 static void qfq_slot_rotate(struct qfq_group
*grp
, u64 roundedS
)
936 unsigned int i
= (grp
->S
- roundedS
) >> grp
->slot_shift
;
938 grp
->full_slots
<<= i
;
939 grp
->front
= (grp
->front
- i
) % QFQ_MAX_SLOTS
;
942 static void qfq_update_eligible(struct qfq_sched
*q
)
944 struct qfq_group
*grp
;
945 unsigned long ineligible
;
947 ineligible
= q
->bitmaps
[IR
] | q
->bitmaps
[IB
];
949 if (!q
->bitmaps
[ER
]) {
950 grp
= qfq_ffs(q
, ineligible
);
951 if (qfq_gt(grp
->S
, q
->V
))
954 qfq_make_eligible(q
);
958 /* Dequeue head packet of the head class in the DRR queue of the aggregate. */
959 static void agg_dequeue(struct qfq_aggregate
*agg
,
960 struct qfq_class
*cl
, unsigned int len
)
962 qdisc_dequeue_peeked(cl
->qdisc
);
964 cl
->deficit
-= (int) len
;
966 if (cl
->qdisc
->q
.qlen
== 0) /* no more packets, remove from list */
967 list_del(&cl
->alist
);
968 else if (cl
->deficit
< qdisc_pkt_len(cl
->qdisc
->ops
->peek(cl
->qdisc
))) {
969 cl
->deficit
+= agg
->lmax
;
970 list_move_tail(&cl
->alist
, &agg
->active
);
974 static inline struct sk_buff
*qfq_peek_skb(struct qfq_aggregate
*agg
,
975 struct qfq_class
**cl
,
980 *cl
= list_first_entry(&agg
->active
, struct qfq_class
, alist
);
981 skb
= (*cl
)->qdisc
->ops
->peek((*cl
)->qdisc
);
983 WARN_ONCE(1, "qfq_dequeue: non-workconserving leaf\n");
985 *len
= qdisc_pkt_len(skb
);
990 /* Update F according to the actual service received by the aggregate. */
991 static inline void charge_actual_service(struct qfq_aggregate
*agg
)
993 /* compute the service received by the aggregate */
994 u32 service_received
= agg
->initial_budget
- agg
->budget
;
996 agg
->F
= agg
->S
+ (u64
)service_received
* agg
->inv_w
;
999 static struct sk_buff
*qfq_dequeue(struct Qdisc
*sch
)
1001 struct qfq_sched
*q
= qdisc_priv(sch
);
1002 struct qfq_aggregate
*in_serv_agg
= q
->in_serv_agg
;
1003 struct qfq_class
*cl
;
1004 struct sk_buff
*skb
= NULL
;
1005 /* next-packet len, 0 means no more active classes in in-service agg */
1006 unsigned int len
= 0;
1008 if (in_serv_agg
== NULL
)
1011 if (!list_empty(&in_serv_agg
->active
))
1012 skb
= qfq_peek_skb(in_serv_agg
, &cl
, &len
);
1015 * If there are no active classes in the in-service aggregate,
1016 * or if the aggregate has not enough budget to serve its next
1017 * class, then choose the next aggregate to serve.
1019 if (len
== 0 || in_serv_agg
->budget
< len
) {
1020 charge_actual_service(in_serv_agg
);
1022 /* recharge the budget of the aggregate */
1023 in_serv_agg
->initial_budget
= in_serv_agg
->budget
=
1024 in_serv_agg
->budgetmax
;
1026 if (!list_empty(&in_serv_agg
->active
))
1028 * Still active: reschedule for
1029 * service. Possible optimization: if no other
1030 * aggregate is active, then there is no point
1031 * in rescheduling this aggregate, and we can
1032 * just keep it as the in-service one. This
1033 * should be however a corner case, and to
1034 * handle it, we would need to maintain an
1035 * extra num_active_aggs field.
1037 qfq_activate_agg(q
, in_serv_agg
, requeue
);
1038 else if (sch
->q
.qlen
== 0) { /* no aggregate to serve */
1039 q
->in_serv_agg
= NULL
;
1044 * If we get here, there are other aggregates queued:
1045 * choose the new aggregate to serve.
1047 in_serv_agg
= q
->in_serv_agg
= qfq_choose_next_agg(q
);
1048 skb
= qfq_peek_skb(in_serv_agg
, &cl
, &len
);
1054 qdisc_bstats_update(sch
, skb
);
1056 agg_dequeue(in_serv_agg
, cl
, len
);
1057 in_serv_agg
->budget
-= len
;
1058 q
->V
+= (u64
)len
* IWSUM
;
1059 pr_debug("qfq dequeue: len %u F %lld now %lld\n",
1060 len
, (unsigned long long) in_serv_agg
->F
,
1061 (unsigned long long) q
->V
);
1066 static struct qfq_aggregate
*qfq_choose_next_agg(struct qfq_sched
*q
)
1068 struct qfq_group
*grp
;
1069 struct qfq_aggregate
*agg
, *new_front_agg
;
1072 qfq_update_eligible(q
);
1075 if (!q
->bitmaps
[ER
])
1078 grp
= qfq_ffs(q
, q
->bitmaps
[ER
]);
1081 agg
= qfq_slot_head(grp
);
1083 /* agg starts to be served, remove it from schedule */
1084 qfq_front_slot_remove(grp
);
1086 new_front_agg
= qfq_slot_scan(grp
);
1088 if (new_front_agg
== NULL
) /* group is now inactive, remove from ER */
1089 __clear_bit(grp
->index
, &q
->bitmaps
[ER
]);
1091 u64 roundedS
= qfq_round_down(new_front_agg
->S
,
1095 if (grp
->S
== roundedS
)
1098 grp
->F
= roundedS
+ (2ULL << grp
->slot_shift
);
1099 __clear_bit(grp
->index
, &q
->bitmaps
[ER
]);
1100 s
= qfq_calc_state(q
, grp
);
1101 __set_bit(grp
->index
, &q
->bitmaps
[s
]);
1104 qfq_unblock_groups(q
, grp
->index
, old_F
);
1110 * Assign a reasonable start time for a new aggregate in group i.
1111 * Admissible values for \hat(F) are multiples of \sigma_i
1112 * no greater than V+\sigma_i . Larger values mean that
1113 * we had a wraparound so we consider the timestamp to be stale.
1115 * If F is not stale and F >= V then we set S = F.
1116 * Otherwise we should assign S = V, but this may violate
1117 * the ordering in EB (see [2]). So, if we have groups in ER,
1118 * set S to the F_j of the first group j which would be blocking us.
1119 * We are guaranteed not to move S backward because
1120 * otherwise our group i would still be blocked.
1122 static void qfq_update_start(struct qfq_sched
*q
, struct qfq_aggregate
*agg
)
1125 u64 limit
, roundedF
;
1126 int slot_shift
= agg
->grp
->slot_shift
;
1128 roundedF
= qfq_round_down(agg
->F
, slot_shift
);
1129 limit
= qfq_round_down(q
->V
, slot_shift
) + (1ULL << slot_shift
);
1131 if (!qfq_gt(agg
->F
, q
->V
) || qfq_gt(roundedF
, limit
)) {
1132 /* timestamp was stale */
1133 mask
= mask_from(q
->bitmaps
[ER
], agg
->grp
->index
);
1135 struct qfq_group
*next
= qfq_ffs(q
, mask
);
1136 if (qfq_gt(roundedF
, next
->F
)) {
1137 if (qfq_gt(limit
, next
->F
))
1139 else /* preserve timestamp correctness */
1145 } else /* timestamp is not stale */
1150 * Update the timestamps of agg before scheduling/rescheduling it for
1151 * service. In particular, assign to agg->F its maximum possible
1152 * value, i.e., the virtual finish time with which the aggregate
1153 * should be labeled if it used all its budget once in service.
1156 qfq_update_agg_ts(struct qfq_sched
*q
,
1157 struct qfq_aggregate
*agg
, enum update_reason reason
)
1159 if (reason
!= requeue
)
1160 qfq_update_start(q
, agg
);
1161 else /* just charge agg for the service received */
1164 agg
->F
= agg
->S
+ (u64
)agg
->budgetmax
* agg
->inv_w
;
1167 static void qfq_schedule_agg(struct qfq_sched
*, struct qfq_aggregate
*);
1169 static int qfq_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
)
1171 struct qfq_sched
*q
= qdisc_priv(sch
);
1172 struct qfq_class
*cl
;
1173 struct qfq_aggregate
*agg
;
1176 cl
= qfq_classify(skb
, sch
, &err
);
1178 if (err
& __NET_XMIT_BYPASS
)
1179 sch
->qstats
.drops
++;
1183 pr_debug("qfq_enqueue: cl = %x\n", cl
->common
.classid
);
1185 if (unlikely(cl
->agg
->lmax
< qdisc_pkt_len(skb
))) {
1186 pr_debug("qfq: increasing maxpkt from %u to %u for class %u",
1187 cl
->agg
->lmax
, qdisc_pkt_len(skb
), cl
->common
.classid
);
1188 err
= qfq_change_agg(sch
, cl
, cl
->agg
->class_weight
,
1189 qdisc_pkt_len(skb
));
1194 err
= qdisc_enqueue(skb
, cl
->qdisc
);
1195 if (unlikely(err
!= NET_XMIT_SUCCESS
)) {
1196 pr_debug("qfq_enqueue: enqueue failed %d\n", err
);
1197 if (net_xmit_drop_count(err
)) {
1199 sch
->qstats
.drops
++;
1204 bstats_update(&cl
->bstats
, skb
);
1208 /* if the queue was not empty, then done here */
1209 if (cl
->qdisc
->q
.qlen
!= 1) {
1210 if (unlikely(skb
== cl
->qdisc
->ops
->peek(cl
->qdisc
)) &&
1211 list_first_entry(&agg
->active
, struct qfq_class
, alist
)
1212 == cl
&& cl
->deficit
< qdisc_pkt_len(skb
))
1213 list_move_tail(&cl
->alist
, &agg
->active
);
1218 /* schedule class for service within the aggregate */
1219 cl
->deficit
= agg
->lmax
;
1220 list_add_tail(&cl
->alist
, &agg
->active
);
1222 if (list_first_entry(&agg
->active
, struct qfq_class
, alist
) != cl
)
1223 return err
; /* aggregate was not empty, nothing else to do */
1225 /* recharge budget */
1226 agg
->initial_budget
= agg
->budget
= agg
->budgetmax
;
1228 qfq_update_agg_ts(q
, agg
, enqueue
);
1229 if (q
->in_serv_agg
== NULL
)
1230 q
->in_serv_agg
= agg
;
1231 else if (agg
!= q
->in_serv_agg
)
1232 qfq_schedule_agg(q
, agg
);
1238 * Schedule aggregate according to its timestamps.
1240 static void qfq_schedule_agg(struct qfq_sched
*q
, struct qfq_aggregate
*agg
)
1242 struct qfq_group
*grp
= agg
->grp
;
1246 roundedS
= qfq_round_down(agg
->S
, grp
->slot_shift
);
1249 * Insert agg in the correct bucket.
1250 * If agg->S >= grp->S we don't need to adjust the
1251 * bucket list and simply go to the insertion phase.
1252 * Otherwise grp->S is decreasing, we must make room
1253 * in the bucket list, and also recompute the group state.
1254 * Finally, if there were no flows in this group and nobody
1255 * was in ER make sure to adjust V.
1257 if (grp
->full_slots
) {
1258 if (!qfq_gt(grp
->S
, agg
->S
))
1261 /* create a slot for this agg->S */
1262 qfq_slot_rotate(grp
, roundedS
);
1263 /* group was surely ineligible, remove */
1264 __clear_bit(grp
->index
, &q
->bitmaps
[IR
]);
1265 __clear_bit(grp
->index
, &q
->bitmaps
[IB
]);
1266 } else if (!q
->bitmaps
[ER
] && qfq_gt(roundedS
, q
->V
))
1270 grp
->F
= roundedS
+ (2ULL << grp
->slot_shift
);
1271 s
= qfq_calc_state(q
, grp
);
1272 __set_bit(grp
->index
, &q
->bitmaps
[s
]);
1274 pr_debug("qfq enqueue: new state %d %#lx S %lld F %lld V %lld\n",
1276 (unsigned long long) agg
->S
,
1277 (unsigned long long) agg
->F
,
1278 (unsigned long long) q
->V
);
1281 qfq_slot_insert(grp
, agg
, roundedS
);
1285 /* Update agg ts and schedule agg for service */
1286 static void qfq_activate_agg(struct qfq_sched
*q
, struct qfq_aggregate
*agg
,
1287 enum update_reason reason
)
1289 qfq_update_agg_ts(q
, agg
, reason
);
1290 qfq_schedule_agg(q
, agg
);
1293 static void qfq_slot_remove(struct qfq_sched
*q
, struct qfq_group
*grp
,
1294 struct qfq_aggregate
*agg
)
1296 unsigned int i
, offset
;
1299 roundedS
= qfq_round_down(agg
->S
, grp
->slot_shift
);
1300 offset
= (roundedS
- grp
->S
) >> grp
->slot_shift
;
1302 i
= (grp
->front
+ offset
) % QFQ_MAX_SLOTS
;
1304 hlist_del(&agg
->next
);
1305 if (hlist_empty(&grp
->slots
[i
]))
1306 __clear_bit(offset
, &grp
->full_slots
);
1310 * Called to forcibly deschedule an aggregate. If the aggregate is
1311 * not in the front bucket, or if the latter has other aggregates in
1312 * the front bucket, we can simply remove the aggregate with no other
1314 * Otherwise we must propagate the event up.
1316 static void qfq_deactivate_agg(struct qfq_sched
*q
, struct qfq_aggregate
*agg
)
1318 struct qfq_group
*grp
= agg
->grp
;
1323 if (agg
== q
->in_serv_agg
) {
1324 charge_actual_service(agg
);
1325 q
->in_serv_agg
= qfq_choose_next_agg(q
);
1330 qfq_slot_remove(q
, grp
, agg
);
1332 if (!grp
->full_slots
) {
1333 __clear_bit(grp
->index
, &q
->bitmaps
[IR
]);
1334 __clear_bit(grp
->index
, &q
->bitmaps
[EB
]);
1335 __clear_bit(grp
->index
, &q
->bitmaps
[IB
]);
1337 if (test_bit(grp
->index
, &q
->bitmaps
[ER
]) &&
1338 !(q
->bitmaps
[ER
] & ~((1UL << grp
->index
) - 1))) {
1339 mask
= q
->bitmaps
[ER
] & ((1UL << grp
->index
) - 1);
1341 mask
= ~((1UL << __fls(mask
)) - 1);
1344 qfq_move_groups(q
, mask
, EB
, ER
);
1345 qfq_move_groups(q
, mask
, IB
, IR
);
1347 __clear_bit(grp
->index
, &q
->bitmaps
[ER
]);
1348 } else if (hlist_empty(&grp
->slots
[grp
->front
])) {
1349 agg
= qfq_slot_scan(grp
);
1350 roundedS
= qfq_round_down(agg
->S
, grp
->slot_shift
);
1351 if (grp
->S
!= roundedS
) {
1352 __clear_bit(grp
->index
, &q
->bitmaps
[ER
]);
1353 __clear_bit(grp
->index
, &q
->bitmaps
[IR
]);
1354 __clear_bit(grp
->index
, &q
->bitmaps
[EB
]);
1355 __clear_bit(grp
->index
, &q
->bitmaps
[IB
]);
1357 grp
->F
= roundedS
+ (2ULL << grp
->slot_shift
);
1358 s
= qfq_calc_state(q
, grp
);
1359 __set_bit(grp
->index
, &q
->bitmaps
[s
]);
1363 qfq_update_eligible(q
);
1366 static void qfq_qlen_notify(struct Qdisc
*sch
, unsigned long arg
)
1368 struct qfq_sched
*q
= qdisc_priv(sch
);
1369 struct qfq_class
*cl
= (struct qfq_class
*)arg
;
1371 if (cl
->qdisc
->q
.qlen
== 0)
1372 qfq_deactivate_class(q
, cl
);
1375 static unsigned int qfq_drop_from_slot(struct qfq_sched
*q
,
1376 struct hlist_head
*slot
)
1378 struct qfq_aggregate
*agg
;
1379 struct hlist_node
*n
;
1380 struct qfq_class
*cl
;
1383 hlist_for_each_entry(agg
, n
, slot
, next
) {
1384 list_for_each_entry(cl
, &agg
->active
, alist
) {
1386 if (!cl
->qdisc
->ops
->drop
)
1389 len
= cl
->qdisc
->ops
->drop(cl
->qdisc
);
1391 if (cl
->qdisc
->q
.qlen
== 0)
1392 qfq_deactivate_class(q
, cl
);
1401 static unsigned int qfq_drop(struct Qdisc
*sch
)
1403 struct qfq_sched
*q
= qdisc_priv(sch
);
1404 struct qfq_group
*grp
;
1405 unsigned int i
, j
, len
;
1407 for (i
= 0; i
<= QFQ_MAX_INDEX
; i
++) {
1408 grp
= &q
->groups
[i
];
1409 for (j
= 0; j
< QFQ_MAX_SLOTS
; j
++) {
1410 len
= qfq_drop_from_slot(q
, &grp
->slots
[j
]);
1422 static int qfq_init_qdisc(struct Qdisc
*sch
, struct nlattr
*opt
)
1424 struct qfq_sched
*q
= qdisc_priv(sch
);
1425 struct qfq_group
*grp
;
1427 u32 max_cl_shift
, maxbudg_shift
, max_classes
;
1429 err
= qdisc_class_hash_init(&q
->clhash
);
1433 if (qdisc_dev(sch
)->tx_queue_len
+ 1 > QFQ_MAX_AGG_CLASSES
)
1434 max_classes
= QFQ_MAX_AGG_CLASSES
;
1436 max_classes
= qdisc_dev(sch
)->tx_queue_len
+ 1;
1437 /* max_cl_shift = floor(log_2(max_classes)) */
1438 max_cl_shift
= __fls(max_classes
);
1439 q
->max_agg_classes
= 1<<max_cl_shift
;
1441 /* maxbudg_shift = log2(max_len * max_classes_per_agg) */
1442 maxbudg_shift
= QFQ_MTU_SHIFT
+ max_cl_shift
;
1443 q
->min_slot_shift
= FRAC_BITS
+ maxbudg_shift
- QFQ_MAX_INDEX
;
1445 for (i
= 0; i
<= QFQ_MAX_INDEX
; i
++) {
1446 grp
= &q
->groups
[i
];
1448 grp
->slot_shift
= q
->min_slot_shift
+ i
;
1449 for (j
= 0; j
< QFQ_MAX_SLOTS
; j
++)
1450 INIT_HLIST_HEAD(&grp
->slots
[j
]);
1453 INIT_HLIST_HEAD(&q
->nonfull_aggs
);
1458 static void qfq_reset_qdisc(struct Qdisc
*sch
)
1460 struct qfq_sched
*q
= qdisc_priv(sch
);
1461 struct qfq_class
*cl
;
1462 struct hlist_node
*n
;
1465 for (i
= 0; i
< q
->clhash
.hashsize
; i
++) {
1466 hlist_for_each_entry(cl
, n
, &q
->clhash
.hash
[i
], common
.hnode
) {
1467 if (cl
->qdisc
->q
.qlen
> 0)
1468 qfq_deactivate_class(q
, cl
);
1470 qdisc_reset(cl
->qdisc
);
1476 static void qfq_destroy_qdisc(struct Qdisc
*sch
)
1478 struct qfq_sched
*q
= qdisc_priv(sch
);
1479 struct qfq_class
*cl
;
1480 struct hlist_node
*n
, *next
;
1483 tcf_destroy_chain(&q
->filter_list
);
1485 for (i
= 0; i
< q
->clhash
.hashsize
; i
++) {
1486 hlist_for_each_entry_safe(cl
, n
, next
, &q
->clhash
.hash
[i
],
1488 qfq_destroy_class(sch
, cl
);
1491 qdisc_class_hash_destroy(&q
->clhash
);
1494 static const struct Qdisc_class_ops qfq_class_ops
= {
1495 .change
= qfq_change_class
,
1496 .delete = qfq_delete_class
,
1497 .get
= qfq_get_class
,
1498 .put
= qfq_put_class
,
1499 .tcf_chain
= qfq_tcf_chain
,
1500 .bind_tcf
= qfq_bind_tcf
,
1501 .unbind_tcf
= qfq_unbind_tcf
,
1502 .graft
= qfq_graft_class
,
1503 .leaf
= qfq_class_leaf
,
1504 .qlen_notify
= qfq_qlen_notify
,
1505 .dump
= qfq_dump_class
,
1506 .dump_stats
= qfq_dump_class_stats
,
1510 static struct Qdisc_ops qfq_qdisc_ops __read_mostly
= {
1511 .cl_ops
= &qfq_class_ops
,
1513 .priv_size
= sizeof(struct qfq_sched
),
1514 .enqueue
= qfq_enqueue
,
1515 .dequeue
= qfq_dequeue
,
1516 .peek
= qdisc_peek_dequeued
,
1518 .init
= qfq_init_qdisc
,
1519 .reset
= qfq_reset_qdisc
,
1520 .destroy
= qfq_destroy_qdisc
,
1521 .owner
= THIS_MODULE
,
1524 static int __init
qfq_init(void)
1526 return register_qdisc(&qfq_qdisc_ops
);
1529 static void __exit
qfq_exit(void)
1531 unregister_qdisc(&qfq_qdisc_ops
);
1534 module_init(qfq_init
);
1535 module_exit(qfq_exit
);
1536 MODULE_LICENSE("GPL");