2 * net/sched/sch_sfq.c Stochastic Fairness Queueing discipline.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/jiffies.h>
16 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/init.h>
20 #include <linux/skbuff.h>
21 #include <linux/jhash.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <net/netlink.h>
25 #include <net/pkt_sched.h>
26 #include <net/pkt_cls.h>
30 /* Stochastic Fairness Queuing algorithm.
31 =======================================
34 Paul E. McKenney "Stochastic Fairness Queuing",
35 IEEE INFOCOMM'90 Proceedings, San Francisco, 1990.
37 Paul E. McKenney "Stochastic Fairness Queuing",
38 "Interworking: Research and Experience", v.2, 1991, p.113-131.
42 M. Shreedhar and George Varghese "Efficient Fair
43 Queuing using Deficit Round Robin", Proc. SIGCOMM 95.
46 This is not the thing that is usually called (W)FQ nowadays.
47 It does not use any timestamp mechanism, but instead
48 processes queues in round-robin order.
52 - It is very cheap. Both CPU and memory requirements are minimal.
56 - "Stochastic" -> It is not 100% fair.
57 When hash collisions occur, several flows are considered as one.
59 - "Round-robin" -> It introduces larger delays than virtual clock
60 based schemes, and should not be used for isolating interactive
61 traffic from non-interactive. It means, that this scheduler
62 should be used as leaf of CBQ or P3, which put interactive traffic
63 to higher priority band.
65 We still need true WFQ for top level CSZ, but using WFQ
66 for the best effort traffic is absolutely pointless:
67 SFQ is superior for this purpose.
70 This implementation limits :
71 - maximal queue length per flow to 127 packets.
74 - number of hash buckets to 65536.
76 It is easy to increase these values, but not in flight. */
78 #define SFQ_MAX_DEPTH 127 /* max number of packets per flow */
79 #define SFQ_DEFAULT_FLOWS 128
80 #define SFQ_MAX_FLOWS (0x10000 - SFQ_MAX_DEPTH - 1) /* max number of flows */
81 #define SFQ_EMPTY_SLOT 0xffff
82 #define SFQ_DEFAULT_HASH_DIVISOR 1024
84 /* We use 16 bits to store allot, and want to handle packets up to 64K
85 * Scale allot by 8 (1<<3) so that no overflow occurs.
87 #define SFQ_ALLOT_SHIFT 3
88 #define SFQ_ALLOT_SIZE(X) DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT)
90 /* This type should contain at least SFQ_MAX_DEPTH + 1 + SFQ_MAX_FLOWS values */
91 typedef u16 sfq_index
;
94 * We dont use pointers to save space.
95 * Small indexes [0 ... SFQ_MAX_FLOWS - 1] are 'pointers' to slots[] array
96 * while following values [SFQ_MAX_FLOWS ... SFQ_MAX_FLOWS + SFQ_MAX_DEPTH]
97 * are 'pointers' to dep[] array
105 struct sk_buff
*skblist_next
;
106 struct sk_buff
*skblist_prev
;
107 sfq_index qlen
; /* number of skbs in skblist */
108 sfq_index next
; /* next slot in sfq RR chain */
109 struct sfq_head dep
; /* anchor in dep[] chains */
110 unsigned short hash
; /* hash value (index in ht[]) */
111 short allot
; /* credit for this slot */
113 unsigned int backlog
;
114 struct red_vars vars
;
117 struct sfq_sched_data
{
118 /* frequently used fields */
119 int limit
; /* limit of total number of packets in this qdisc */
120 unsigned int divisor
; /* number of slots in hash table */
122 u8 maxdepth
; /* limit of packets per flow */
125 u8 cur_depth
; /* depth of longest slot */
127 unsigned short scaled_quantum
; /* SFQ_ALLOT_SIZE(quantum) */
128 struct tcf_proto __rcu
*filter_list
;
129 sfq_index
*ht
; /* Hash table ('divisor' slots) */
130 struct sfq_slot
*slots
; /* Flows table ('maxflows' entries) */
132 struct red_parms
*red_parms
;
133 struct tc_sfqred_stats stats
;
134 struct sfq_slot
*tail
; /* current slot in round */
136 struct sfq_head dep
[SFQ_MAX_DEPTH
+ 1];
137 /* Linked lists of slots, indexed by depth
138 * dep[0] : list of unused flows
139 * dep[1] : list of flows with 1 packet
140 * dep[X] : list of flows with X packets
143 unsigned int maxflows
; /* number of flows in flows array */
145 unsigned int quantum
; /* Allotment per round: MUST BE >= MTU */
146 struct timer_list perturb_timer
;
150 * sfq_head are either in a sfq_slot or in dep[] array
152 static inline struct sfq_head
*sfq_dep_head(struct sfq_sched_data
*q
, sfq_index val
)
154 if (val
< SFQ_MAX_FLOWS
)
155 return &q
->slots
[val
].dep
;
156 return &q
->dep
[val
- SFQ_MAX_FLOWS
];
159 static unsigned int sfq_hash(const struct sfq_sched_data
*q
,
160 const struct sk_buff
*skb
)
162 return skb_get_hash_perturb(skb
, q
->perturbation
) & (q
->divisor
- 1);
165 static unsigned int sfq_classify(struct sk_buff
*skb
, struct Qdisc
*sch
,
168 struct sfq_sched_data
*q
= qdisc_priv(sch
);
169 struct tcf_result res
;
170 struct tcf_proto
*fl
;
173 if (TC_H_MAJ(skb
->priority
) == sch
->handle
&&
174 TC_H_MIN(skb
->priority
) > 0 &&
175 TC_H_MIN(skb
->priority
) <= q
->divisor
)
176 return TC_H_MIN(skb
->priority
);
178 fl
= rcu_dereference_bh(q
->filter_list
);
180 return sfq_hash(q
, skb
) + 1;
182 *qerr
= NET_XMIT_SUCCESS
| __NET_XMIT_BYPASS
;
183 result
= tc_classify(skb
, fl
, &res
, false);
185 #ifdef CONFIG_NET_CLS_ACT
189 *qerr
= NET_XMIT_SUCCESS
| __NET_XMIT_STOLEN
;
194 if (TC_H_MIN(res
.classid
) <= q
->divisor
)
195 return TC_H_MIN(res
.classid
);
201 * x : slot number [0 .. SFQ_MAX_FLOWS - 1]
203 static inline void sfq_link(struct sfq_sched_data
*q
, sfq_index x
)
206 struct sfq_slot
*slot
= &q
->slots
[x
];
207 int qlen
= slot
->qlen
;
209 p
= qlen
+ SFQ_MAX_FLOWS
;
210 n
= q
->dep
[qlen
].next
;
215 q
->dep
[qlen
].next
= x
; /* sfq_dep_head(q, p)->next = x */
216 sfq_dep_head(q
, n
)->prev
= x
;
219 #define sfq_unlink(q, x, n, p) \
221 n = q->slots[x].dep.next; \
222 p = q->slots[x].dep.prev; \
223 sfq_dep_head(q, p)->next = n; \
224 sfq_dep_head(q, n)->prev = p; \
228 static inline void sfq_dec(struct sfq_sched_data
*q
, sfq_index x
)
233 sfq_unlink(q
, x
, n
, p
);
235 d
= q
->slots
[x
].qlen
--;
236 if (n
== p
&& q
->cur_depth
== d
)
241 static inline void sfq_inc(struct sfq_sched_data
*q
, sfq_index x
)
246 sfq_unlink(q
, x
, n
, p
);
248 d
= ++q
->slots
[x
].qlen
;
249 if (q
->cur_depth
< d
)
254 /* helper functions : might be changed when/if skb use a standard list_head */
256 /* remove one skb from tail of slot queue */
257 static inline struct sk_buff
*slot_dequeue_tail(struct sfq_slot
*slot
)
259 struct sk_buff
*skb
= slot
->skblist_prev
;
261 slot
->skblist_prev
= skb
->prev
;
262 skb
->prev
->next
= (struct sk_buff
*)slot
;
263 skb
->next
= skb
->prev
= NULL
;
267 /* remove one skb from head of slot queue */
268 static inline struct sk_buff
*slot_dequeue_head(struct sfq_slot
*slot
)
270 struct sk_buff
*skb
= slot
->skblist_next
;
272 slot
->skblist_next
= skb
->next
;
273 skb
->next
->prev
= (struct sk_buff
*)slot
;
274 skb
->next
= skb
->prev
= NULL
;
278 static inline void slot_queue_init(struct sfq_slot
*slot
)
280 memset(slot
, 0, sizeof(*slot
));
281 slot
->skblist_prev
= slot
->skblist_next
= (struct sk_buff
*)slot
;
284 /* add skb to slot queue (tail add) */
285 static inline void slot_queue_add(struct sfq_slot
*slot
, struct sk_buff
*skb
)
287 skb
->prev
= slot
->skblist_prev
;
288 skb
->next
= (struct sk_buff
*)slot
;
289 slot
->skblist_prev
->next
= skb
;
290 slot
->skblist_prev
= skb
;
293 static unsigned int sfq_drop(struct Qdisc
*sch
)
295 struct sfq_sched_data
*q
= qdisc_priv(sch
);
296 sfq_index x
, d
= q
->cur_depth
;
299 struct sfq_slot
*slot
;
301 /* Queue is full! Find the longest slot and drop tail packet from it */
306 skb
= q
->headdrop
? slot_dequeue_head(slot
) : slot_dequeue_tail(slot
);
307 len
= qdisc_pkt_len(skb
);
308 slot
->backlog
-= len
;
311 qdisc_qstats_drop(sch
);
312 qdisc_qstats_backlog_dec(sch
, skb
);
318 /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
321 q
->tail
->next
= slot
->next
;
322 q
->ht
[slot
->hash
] = SFQ_EMPTY_SLOT
;
329 /* Is ECN parameter configured */
330 static int sfq_prob_mark(const struct sfq_sched_data
*q
)
332 return q
->flags
& TC_RED_ECN
;
335 /* Should packets over max threshold just be marked */
336 static int sfq_hard_mark(const struct sfq_sched_data
*q
)
338 return (q
->flags
& (TC_RED_ECN
| TC_RED_HARDDROP
)) == TC_RED_ECN
;
341 static int sfq_headdrop(const struct sfq_sched_data
*q
)
347 sfq_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
, struct sk_buff
**to_free
)
349 struct sfq_sched_data
*q
= qdisc_priv(sch
);
350 unsigned int hash
, dropped
;
352 struct sfq_slot
*slot
;
353 int uninitialized_var(ret
);
354 struct sk_buff
*head
;
357 hash
= sfq_classify(skb
, sch
, &ret
);
359 if (ret
& __NET_XMIT_BYPASS
)
360 qdisc_qstats_drop(sch
);
368 if (x
== SFQ_EMPTY_SLOT
) {
369 x
= q
->dep
[0].next
; /* get a free slot */
370 if (x
>= SFQ_MAX_FLOWS
)
371 return qdisc_drop(skb
, sch
, to_free
);
375 slot
->backlog
= 0; /* should already be 0 anyway... */
376 red_set_vars(&slot
->vars
);
380 slot
->vars
.qavg
= red_calc_qavg_no_idle_time(q
->red_parms
,
383 switch (red_action(q
->red_parms
,
390 qdisc_qstats_overlimit(sch
);
391 if (sfq_prob_mark(q
)) {
392 /* We know we have at least one packet in queue */
393 if (sfq_headdrop(q
) &&
394 INET_ECN_set_ce(slot
->skblist_next
)) {
395 q
->stats
.prob_mark_head
++;
398 if (INET_ECN_set_ce(skb
)) {
399 q
->stats
.prob_mark
++;
403 q
->stats
.prob_drop
++;
404 goto congestion_drop
;
407 qdisc_qstats_overlimit(sch
);
408 if (sfq_hard_mark(q
)) {
409 /* We know we have at least one packet in queue */
410 if (sfq_headdrop(q
) &&
411 INET_ECN_set_ce(slot
->skblist_next
)) {
412 q
->stats
.forced_mark_head
++;
415 if (INET_ECN_set_ce(skb
)) {
416 q
->stats
.forced_mark
++;
420 q
->stats
.forced_drop
++;
421 goto congestion_drop
;
425 if (slot
->qlen
>= q
->maxdepth
) {
427 if (!sfq_headdrop(q
))
428 return qdisc_drop(skb
, sch
, to_free
);
430 /* We know we have at least one packet in queue */
431 head
= slot_dequeue_head(slot
);
432 delta
= qdisc_pkt_len(head
) - qdisc_pkt_len(skb
);
433 sch
->qstats
.backlog
-= delta
;
434 slot
->backlog
-= delta
;
435 qdisc_drop(head
, sch
, to_free
);
437 slot_queue_add(slot
, skb
);
442 qdisc_qstats_backlog_inc(sch
, skb
);
443 slot
->backlog
+= qdisc_pkt_len(skb
);
444 slot_queue_add(slot
, skb
);
446 if (slot
->qlen
== 1) { /* The flow is new */
447 if (q
->tail
== NULL
) { /* It is the first flow */
450 slot
->next
= q
->tail
->next
;
453 /* We put this flow at the end of our flow list.
454 * This might sound unfair for a new flow to wait after old ones,
455 * but we could endup servicing new flows only, and freeze old ones.
458 /* We could use a bigger initial quantum for new flows */
459 slot
->allot
= q
->scaled_quantum
;
461 if (++sch
->q
.qlen
<= q
->limit
)
462 return NET_XMIT_SUCCESS
;
465 dropped
= sfq_drop(sch
);
466 /* Return Congestion Notification only if we dropped a packet
469 if (qlen
!= slot
->qlen
)
472 /* As we dropped a packet, better let upper stack know this */
473 qdisc_tree_reduce_backlog(sch
, 1, dropped
);
474 return NET_XMIT_SUCCESS
;
477 static struct sk_buff
*
478 sfq_dequeue(struct Qdisc
*sch
)
480 struct sfq_sched_data
*q
= qdisc_priv(sch
);
483 struct sfq_slot
*slot
;
485 /* No active slots */
492 if (slot
->allot
<= 0) {
494 slot
->allot
+= q
->scaled_quantum
;
497 skb
= slot_dequeue_head(slot
);
499 qdisc_bstats_update(sch
, skb
);
501 qdisc_qstats_backlog_dec(sch
, skb
);
502 slot
->backlog
-= qdisc_pkt_len(skb
);
503 /* Is the slot empty? */
504 if (slot
->qlen
== 0) {
505 q
->ht
[slot
->hash
] = SFQ_EMPTY_SLOT
;
508 q
->tail
= NULL
; /* no more active slots */
511 q
->tail
->next
= next_a
;
513 slot
->allot
-= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb
));
519 sfq_reset(struct Qdisc
*sch
)
523 while ((skb
= sfq_dequeue(sch
)) != NULL
)
524 rtnl_kfree_skbs(skb
, skb
);
528 * When q->perturbation is changed, we rehash all queued skbs
529 * to avoid OOO (Out Of Order) effects.
530 * We dont use sfq_dequeue()/sfq_enqueue() because we dont want to change
533 static void sfq_rehash(struct Qdisc
*sch
)
535 struct sfq_sched_data
*q
= qdisc_priv(sch
);
538 struct sfq_slot
*slot
;
539 struct sk_buff_head list
;
541 unsigned int drop_len
= 0;
543 __skb_queue_head_init(&list
);
545 for (i
= 0; i
< q
->maxflows
; i
++) {
550 skb
= slot_dequeue_head(slot
);
552 __skb_queue_tail(&list
, skb
);
555 red_set_vars(&slot
->vars
);
556 q
->ht
[slot
->hash
] = SFQ_EMPTY_SLOT
;
560 while ((skb
= __skb_dequeue(&list
)) != NULL
) {
561 unsigned int hash
= sfq_hash(q
, skb
);
562 sfq_index x
= q
->ht
[hash
];
565 if (x
== SFQ_EMPTY_SLOT
) {
566 x
= q
->dep
[0].next
; /* get a free slot */
567 if (x
>= SFQ_MAX_FLOWS
) {
569 qdisc_qstats_backlog_dec(sch
, skb
);
570 drop_len
+= qdisc_pkt_len(skb
);
579 if (slot
->qlen
>= q
->maxdepth
)
581 slot_queue_add(slot
, skb
);
583 slot
->vars
.qavg
= red_calc_qavg(q
->red_parms
,
586 slot
->backlog
+= qdisc_pkt_len(skb
);
588 if (slot
->qlen
== 1) { /* The flow is new */
589 if (q
->tail
== NULL
) { /* It is the first flow */
592 slot
->next
= q
->tail
->next
;
596 slot
->allot
= q
->scaled_quantum
;
599 sch
->q
.qlen
-= dropped
;
600 qdisc_tree_reduce_backlog(sch
, dropped
, drop_len
);
603 static void sfq_perturbation(unsigned long arg
)
605 struct Qdisc
*sch
= (struct Qdisc
*)arg
;
606 struct sfq_sched_data
*q
= qdisc_priv(sch
);
607 spinlock_t
*root_lock
= qdisc_lock(qdisc_root_sleeping(sch
));
609 spin_lock(root_lock
);
610 q
->perturbation
= prandom_u32();
611 if (!q
->filter_list
&& q
->tail
)
613 spin_unlock(root_lock
);
615 if (q
->perturb_period
)
616 mod_timer(&q
->perturb_timer
, jiffies
+ q
->perturb_period
);
619 static int sfq_change(struct Qdisc
*sch
, struct nlattr
*opt
)
621 struct sfq_sched_data
*q
= qdisc_priv(sch
);
622 struct tc_sfq_qopt
*ctl
= nla_data(opt
);
623 struct tc_sfq_qopt_v1
*ctl_v1
= NULL
;
624 unsigned int qlen
, dropped
= 0;
625 struct red_parms
*p
= NULL
;
627 if (opt
->nla_len
< nla_attr_size(sizeof(*ctl
)))
629 if (opt
->nla_len
>= nla_attr_size(sizeof(*ctl_v1
)))
630 ctl_v1
= nla_data(opt
);
632 (!is_power_of_2(ctl
->divisor
) || ctl
->divisor
> 65536))
634 if (ctl_v1
&& ctl_v1
->qth_min
) {
635 p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
641 q
->quantum
= ctl
->quantum
;
642 q
->scaled_quantum
= SFQ_ALLOT_SIZE(q
->quantum
);
644 q
->perturb_period
= ctl
->perturb_period
* HZ
;
646 q
->maxflows
= min_t(u32
, ctl
->flows
, SFQ_MAX_FLOWS
);
648 q
->divisor
= ctl
->divisor
;
649 q
->maxflows
= min_t(u32
, q
->maxflows
, q
->divisor
);
653 q
->maxdepth
= min_t(u32
, ctl_v1
->depth
, SFQ_MAX_DEPTH
);
655 swap(q
->red_parms
, p
);
656 red_set_parms(q
->red_parms
,
657 ctl_v1
->qth_min
, ctl_v1
->qth_max
,
659 ctl_v1
->Plog
, ctl_v1
->Scell_log
,
663 q
->flags
= ctl_v1
->flags
;
664 q
->headdrop
= ctl_v1
->headdrop
;
667 q
->limit
= min_t(u32
, ctl
->limit
, q
->maxdepth
* q
->maxflows
);
668 q
->maxflows
= min_t(u32
, q
->maxflows
, q
->limit
);
672 while (sch
->q
.qlen
> q
->limit
)
673 dropped
+= sfq_drop(sch
);
674 qdisc_tree_reduce_backlog(sch
, qlen
- sch
->q
.qlen
, dropped
);
676 del_timer(&q
->perturb_timer
);
677 if (q
->perturb_period
) {
678 mod_timer(&q
->perturb_timer
, jiffies
+ q
->perturb_period
);
679 q
->perturbation
= prandom_u32();
681 sch_tree_unlock(sch
);
686 static void *sfq_alloc(size_t sz
)
688 void *ptr
= kmalloc(sz
, GFP_KERNEL
| __GFP_NOWARN
);
695 static void sfq_free(void *addr
)
700 static void sfq_destroy(struct Qdisc
*sch
)
702 struct sfq_sched_data
*q
= qdisc_priv(sch
);
704 tcf_destroy_chain(&q
->filter_list
);
705 q
->perturb_period
= 0;
706 del_timer_sync(&q
->perturb_timer
);
712 static int sfq_init(struct Qdisc
*sch
, struct nlattr
*opt
)
714 struct sfq_sched_data
*q
= qdisc_priv(sch
);
717 q
->perturb_timer
.function
= sfq_perturbation
;
718 q
->perturb_timer
.data
= (unsigned long)sch
;
719 init_timer_deferrable(&q
->perturb_timer
);
721 for (i
= 0; i
< SFQ_MAX_DEPTH
+ 1; i
++) {
722 q
->dep
[i
].next
= i
+ SFQ_MAX_FLOWS
;
723 q
->dep
[i
].prev
= i
+ SFQ_MAX_FLOWS
;
726 q
->limit
= SFQ_MAX_DEPTH
;
727 q
->maxdepth
= SFQ_MAX_DEPTH
;
730 q
->divisor
= SFQ_DEFAULT_HASH_DIVISOR
;
731 q
->maxflows
= SFQ_DEFAULT_FLOWS
;
732 q
->quantum
= psched_mtu(qdisc_dev(sch
));
733 q
->scaled_quantum
= SFQ_ALLOT_SIZE(q
->quantum
);
734 q
->perturb_period
= 0;
735 q
->perturbation
= prandom_u32();
738 int err
= sfq_change(sch
, opt
);
743 q
->ht
= sfq_alloc(sizeof(q
->ht
[0]) * q
->divisor
);
744 q
->slots
= sfq_alloc(sizeof(q
->slots
[0]) * q
->maxflows
);
745 if (!q
->ht
|| !q
->slots
) {
746 /* Note: sfq_destroy() will be called by our caller */
750 for (i
= 0; i
< q
->divisor
; i
++)
751 q
->ht
[i
] = SFQ_EMPTY_SLOT
;
753 for (i
= 0; i
< q
->maxflows
; i
++) {
754 slot_queue_init(&q
->slots
[i
]);
758 sch
->flags
|= TCQ_F_CAN_BYPASS
;
760 sch
->flags
&= ~TCQ_F_CAN_BYPASS
;
764 static int sfq_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
766 struct sfq_sched_data
*q
= qdisc_priv(sch
);
767 unsigned char *b
= skb_tail_pointer(skb
);
768 struct tc_sfq_qopt_v1 opt
;
769 struct red_parms
*p
= q
->red_parms
;
771 memset(&opt
, 0, sizeof(opt
));
772 opt
.v0
.quantum
= q
->quantum
;
773 opt
.v0
.perturb_period
= q
->perturb_period
/ HZ
;
774 opt
.v0
.limit
= q
->limit
;
775 opt
.v0
.divisor
= q
->divisor
;
776 opt
.v0
.flows
= q
->maxflows
;
777 opt
.depth
= q
->maxdepth
;
778 opt
.headdrop
= q
->headdrop
;
781 opt
.qth_min
= p
->qth_min
>> p
->Wlog
;
782 opt
.qth_max
= p
->qth_max
>> p
->Wlog
;
785 opt
.Scell_log
= p
->Scell_log
;
786 opt
.max_P
= p
->max_P
;
788 memcpy(&opt
.stats
, &q
->stats
, sizeof(opt
.stats
));
789 opt
.flags
= q
->flags
;
791 if (nla_put(skb
, TCA_OPTIONS
, sizeof(opt
), &opt
))
792 goto nla_put_failure
;
801 static struct Qdisc
*sfq_leaf(struct Qdisc
*sch
, unsigned long arg
)
806 static unsigned long sfq_get(struct Qdisc
*sch
, u32 classid
)
811 static unsigned long sfq_bind(struct Qdisc
*sch
, unsigned long parent
,
814 /* we cannot bypass queue discipline anymore */
815 sch
->flags
&= ~TCQ_F_CAN_BYPASS
;
819 static void sfq_put(struct Qdisc
*q
, unsigned long cl
)
823 static struct tcf_proto __rcu
**sfq_find_tcf(struct Qdisc
*sch
,
826 struct sfq_sched_data
*q
= qdisc_priv(sch
);
830 return &q
->filter_list
;
833 static int sfq_dump_class(struct Qdisc
*sch
, unsigned long cl
,
834 struct sk_buff
*skb
, struct tcmsg
*tcm
)
836 tcm
->tcm_handle
|= TC_H_MIN(cl
);
840 static int sfq_dump_class_stats(struct Qdisc
*sch
, unsigned long cl
,
843 struct sfq_sched_data
*q
= qdisc_priv(sch
);
844 sfq_index idx
= q
->ht
[cl
- 1];
845 struct gnet_stats_queue qs
= { 0 };
846 struct tc_sfq_xstats xstats
= { 0 };
848 if (idx
!= SFQ_EMPTY_SLOT
) {
849 const struct sfq_slot
*slot
= &q
->slots
[idx
];
851 xstats
.allot
= slot
->allot
<< SFQ_ALLOT_SHIFT
;
852 qs
.qlen
= slot
->qlen
;
853 qs
.backlog
= slot
->backlog
;
855 if (gnet_stats_copy_queue(d
, NULL
, &qs
, qs
.qlen
) < 0)
857 return gnet_stats_copy_app(d
, &xstats
, sizeof(xstats
));
860 static void sfq_walk(struct Qdisc
*sch
, struct qdisc_walker
*arg
)
862 struct sfq_sched_data
*q
= qdisc_priv(sch
);
868 for (i
= 0; i
< q
->divisor
; i
++) {
869 if (q
->ht
[i
] == SFQ_EMPTY_SLOT
||
870 arg
->count
< arg
->skip
) {
874 if (arg
->fn(sch
, i
+ 1, arg
) < 0) {
882 static const struct Qdisc_class_ops sfq_class_ops
= {
886 .tcf_chain
= sfq_find_tcf
,
887 .bind_tcf
= sfq_bind
,
888 .unbind_tcf
= sfq_put
,
889 .dump
= sfq_dump_class
,
890 .dump_stats
= sfq_dump_class_stats
,
894 static struct Qdisc_ops sfq_qdisc_ops __read_mostly
= {
895 .cl_ops
= &sfq_class_ops
,
897 .priv_size
= sizeof(struct sfq_sched_data
),
898 .enqueue
= sfq_enqueue
,
899 .dequeue
= sfq_dequeue
,
900 .peek
= qdisc_peek_dequeued
,
903 .destroy
= sfq_destroy
,
906 .owner
= THIS_MODULE
,
909 static int __init
sfq_module_init(void)
911 return register_qdisc(&sfq_qdisc_ops
);
913 static void __exit
sfq_module_exit(void)
915 unregister_qdisc(&sfq_qdisc_ops
);
917 module_init(sfq_module_init
)
918 module_exit(sfq_module_exit
)
919 MODULE_LICENSE("GPL");