1 // SPDX-License-Identifier: GPL-2.0-only
3 * net/sched/sch_ets.c Enhanced Transmission Selection scheduler
8 * The Enhanced Transmission Selection scheduler is a classful queuing
9 * discipline that merges functionality of PRIO and DRR qdiscs in one scheduler.
10 * ETS makes it easy to configure a set of strict and bandwidth-sharing bands to
11 * implement the transmission selection described in 802.1Qaz.
13 * Although ETS is technically classful, it's not possible to add and remove
14 * classes at will. Instead one specifies number of classes, how many are
15 * PRIO-like and how many DRR-like, and quanta for the latter.
20 * The strict classes, if any, are tried for traffic first: first band 0, if it
21 * has no traffic then band 1, etc.
23 * When there is no traffic in any of the strict queues, the bandwidth-sharing
24 * ones are tried next. Each band is assigned a deficit counter, initialized to
25 * "quantum" of that band. ETS maintains a list of active bandwidth-sharing
26 * bands whose qdiscs are non-empty. A packet is dequeued from the band at the
27 * head of the list if the packet size is smaller or equal to the deficit
28 * counter. If the counter is too small, it is increased by "quantum" and the
29 * scheduler moves on to the next band in the active list.
32 #include <linux/module.h>
33 #include <net/gen_stats.h>
34 #include <net/netlink.h>
35 #include <net/pkt_cls.h>
36 #include <net/pkt_sched.h>
37 #include <net/sch_generic.h>
40 struct list_head alist
; /* In struct ets_sched.active. */
44 struct gnet_stats_basic_packed bstats
;
45 struct gnet_stats_queue qstats
;
49 struct list_head active
;
50 struct tcf_proto __rcu
*filter_list
;
51 struct tcf_block
*block
;
54 u8 prio2band
[TC_PRIO_MAX
+ 1];
55 struct ets_class classes
[TCQ_ETS_MAX_BANDS
];
58 static const struct nla_policy ets_policy
[TCA_ETS_MAX
+ 1] = {
59 [TCA_ETS_NBANDS
] = { .type
= NLA_U8
},
60 [TCA_ETS_NSTRICT
] = { .type
= NLA_U8
},
61 [TCA_ETS_QUANTA
] = { .type
= NLA_NESTED
},
62 [TCA_ETS_PRIOMAP
] = { .type
= NLA_NESTED
},
65 static const struct nla_policy ets_priomap_policy
[TCA_ETS_MAX
+ 1] = {
66 [TCA_ETS_PRIOMAP_BAND
] = { .type
= NLA_U8
},
69 static const struct nla_policy ets_quanta_policy
[TCA_ETS_MAX
+ 1] = {
70 [TCA_ETS_QUANTA_BAND
] = { .type
= NLA_U32
},
73 static const struct nla_policy ets_class_policy
[TCA_ETS_MAX
+ 1] = {
74 [TCA_ETS_QUANTA_BAND
] = { .type
= NLA_U32
},
77 static int ets_quantum_parse(struct Qdisc
*sch
, const struct nlattr
*attr
,
78 unsigned int *quantum
,
79 struct netlink_ext_ack
*extack
)
81 *quantum
= nla_get_u32(attr
);
83 NL_SET_ERR_MSG(extack
, "ETS quantum cannot be zero");
89 static struct ets_class
*
90 ets_class_from_arg(struct Qdisc
*sch
, unsigned long arg
)
92 struct ets_sched
*q
= qdisc_priv(sch
);
94 return &q
->classes
[arg
- 1];
97 static u32
ets_class_id(struct Qdisc
*sch
, const struct ets_class
*cl
)
99 struct ets_sched
*q
= qdisc_priv(sch
);
100 int band
= cl
- q
->classes
;
102 return TC_H_MAKE(sch
->handle
, band
+ 1);
105 static void ets_offload_change(struct Qdisc
*sch
)
107 struct net_device
*dev
= qdisc_dev(sch
);
108 struct ets_sched
*q
= qdisc_priv(sch
);
109 struct tc_ets_qopt_offload qopt
;
110 unsigned int w_psum_prev
= 0;
111 unsigned int q_psum
= 0;
112 unsigned int q_sum
= 0;
113 unsigned int quantum
;
118 if (!tc_can_offload(dev
) || !dev
->netdev_ops
->ndo_setup_tc
)
121 qopt
.command
= TC_ETS_REPLACE
;
122 qopt
.handle
= sch
->handle
;
123 qopt
.parent
= sch
->parent
;
124 qopt
.replace_params
.bands
= q
->nbands
;
125 qopt
.replace_params
.qstats
= &sch
->qstats
;
126 memcpy(&qopt
.replace_params
.priomap
,
127 q
->prio2band
, sizeof(q
->prio2band
));
129 for (i
= 0; i
< q
->nbands
; i
++)
130 q_sum
+= q
->classes
[i
].quantum
;
132 for (i
= 0; i
< q
->nbands
; i
++) {
133 quantum
= q
->classes
[i
].quantum
;
135 w_psum
= quantum
? q_psum
* 100 / q_sum
: 0;
136 weight
= w_psum
- w_psum_prev
;
137 w_psum_prev
= w_psum
;
139 qopt
.replace_params
.quanta
[i
] = quantum
;
140 qopt
.replace_params
.weights
[i
] = weight
;
143 dev
->netdev_ops
->ndo_setup_tc(dev
, TC_SETUP_QDISC_ETS
, &qopt
);
146 static void ets_offload_destroy(struct Qdisc
*sch
)
148 struct net_device
*dev
= qdisc_dev(sch
);
149 struct tc_ets_qopt_offload qopt
;
151 if (!tc_can_offload(dev
) || !dev
->netdev_ops
->ndo_setup_tc
)
154 qopt
.command
= TC_ETS_DESTROY
;
155 qopt
.handle
= sch
->handle
;
156 qopt
.parent
= sch
->parent
;
157 dev
->netdev_ops
->ndo_setup_tc(dev
, TC_SETUP_QDISC_ETS
, &qopt
);
160 static void ets_offload_graft(struct Qdisc
*sch
, struct Qdisc
*new,
161 struct Qdisc
*old
, unsigned long arg
,
162 struct netlink_ext_ack
*extack
)
164 struct net_device
*dev
= qdisc_dev(sch
);
165 struct tc_ets_qopt_offload qopt
;
167 qopt
.command
= TC_ETS_GRAFT
;
168 qopt
.handle
= sch
->handle
;
169 qopt
.parent
= sch
->parent
;
170 qopt
.graft_params
.band
= arg
- 1;
171 qopt
.graft_params
.child_handle
= new->handle
;
173 qdisc_offload_graft_helper(dev
, sch
, new, old
, TC_SETUP_QDISC_ETS
,
177 static int ets_offload_dump(struct Qdisc
*sch
)
179 struct tc_ets_qopt_offload qopt
;
181 qopt
.command
= TC_ETS_STATS
;
182 qopt
.handle
= sch
->handle
;
183 qopt
.parent
= sch
->parent
;
184 qopt
.stats
.bstats
= &sch
->bstats
;
185 qopt
.stats
.qstats
= &sch
->qstats
;
187 return qdisc_offload_dump_helper(sch
, TC_SETUP_QDISC_ETS
, &qopt
);
190 static bool ets_class_is_strict(struct ets_sched
*q
, const struct ets_class
*cl
)
192 unsigned int band
= cl
- q
->classes
;
194 return band
< q
->nstrict
;
197 static int ets_class_change(struct Qdisc
*sch
, u32 classid
, u32 parentid
,
198 struct nlattr
**tca
, unsigned long *arg
,
199 struct netlink_ext_ack
*extack
)
201 struct ets_class
*cl
= ets_class_from_arg(sch
, *arg
);
202 struct ets_sched
*q
= qdisc_priv(sch
);
203 struct nlattr
*opt
= tca
[TCA_OPTIONS
];
204 struct nlattr
*tb
[TCA_ETS_MAX
+ 1];
205 unsigned int quantum
;
208 /* Classes can be added and removed only through Qdisc_ops.change
212 NL_SET_ERR_MSG(extack
, "Fine-grained class addition and removal is not supported");
217 NL_SET_ERR_MSG(extack
, "ETS options are required for this operation");
221 err
= nla_parse_nested(tb
, TCA_ETS_MAX
, opt
, ets_class_policy
, extack
);
225 if (!tb
[TCA_ETS_QUANTA_BAND
])
226 /* Nothing to configure. */
229 if (ets_class_is_strict(q
, cl
)) {
230 NL_SET_ERR_MSG(extack
, "Strict bands do not have a configurable quantum");
234 err
= ets_quantum_parse(sch
, tb
[TCA_ETS_QUANTA_BAND
], &quantum
,
240 cl
->quantum
= quantum
;
241 sch_tree_unlock(sch
);
243 ets_offload_change(sch
);
247 static int ets_class_graft(struct Qdisc
*sch
, unsigned long arg
,
248 struct Qdisc
*new, struct Qdisc
**old
,
249 struct netlink_ext_ack
*extack
)
251 struct ets_class
*cl
= ets_class_from_arg(sch
, arg
);
254 new = qdisc_create_dflt(sch
->dev_queue
, &pfifo_qdisc_ops
,
255 ets_class_id(sch
, cl
), NULL
);
259 qdisc_hash_add(new, true);
262 *old
= qdisc_replace(sch
, new, &cl
->qdisc
);
263 ets_offload_graft(sch
, new, *old
, arg
, extack
);
267 static struct Qdisc
*ets_class_leaf(struct Qdisc
*sch
, unsigned long arg
)
269 struct ets_class
*cl
= ets_class_from_arg(sch
, arg
);
274 static unsigned long ets_class_find(struct Qdisc
*sch
, u32 classid
)
276 unsigned long band
= TC_H_MIN(classid
);
277 struct ets_sched
*q
= qdisc_priv(sch
);
279 if (band
- 1 >= q
->nbands
)
284 static void ets_class_qlen_notify(struct Qdisc
*sch
, unsigned long arg
)
286 struct ets_class
*cl
= ets_class_from_arg(sch
, arg
);
287 struct ets_sched
*q
= qdisc_priv(sch
);
289 /* We get notified about zero-length child Qdiscs as well if they are
290 * offloaded. Those aren't on the active list though, so don't attempt
293 if (!ets_class_is_strict(q
, cl
) && sch
->q
.qlen
)
294 list_del(&cl
->alist
);
297 static int ets_class_dump(struct Qdisc
*sch
, unsigned long arg
,
298 struct sk_buff
*skb
, struct tcmsg
*tcm
)
300 struct ets_class
*cl
= ets_class_from_arg(sch
, arg
);
301 struct ets_sched
*q
= qdisc_priv(sch
);
304 tcm
->tcm_parent
= TC_H_ROOT
;
305 tcm
->tcm_handle
= ets_class_id(sch
, cl
);
306 tcm
->tcm_info
= cl
->qdisc
->handle
;
308 nest
= nla_nest_start_noflag(skb
, TCA_OPTIONS
);
310 goto nla_put_failure
;
311 if (!ets_class_is_strict(q
, cl
)) {
312 if (nla_put_u32(skb
, TCA_ETS_QUANTA_BAND
, cl
->quantum
))
313 goto nla_put_failure
;
315 return nla_nest_end(skb
, nest
);
318 nla_nest_cancel(skb
, nest
);
322 static int ets_class_dump_stats(struct Qdisc
*sch
, unsigned long arg
,
325 struct ets_class
*cl
= ets_class_from_arg(sch
, arg
);
326 struct Qdisc
*cl_q
= cl
->qdisc
;
328 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch
),
329 d
, NULL
, &cl_q
->bstats
) < 0 ||
330 qdisc_qstats_copy(d
, cl_q
) < 0)
336 static void ets_qdisc_walk(struct Qdisc
*sch
, struct qdisc_walker
*arg
)
338 struct ets_sched
*q
= qdisc_priv(sch
);
344 for (i
= 0; i
< q
->nbands
; i
++) {
345 if (arg
->count
< arg
->skip
) {
349 if (arg
->fn(sch
, i
+ 1, arg
) < 0) {
357 static struct tcf_block
*
358 ets_qdisc_tcf_block(struct Qdisc
*sch
, unsigned long cl
,
359 struct netlink_ext_ack
*extack
)
361 struct ets_sched
*q
= qdisc_priv(sch
);
364 NL_SET_ERR_MSG(extack
, "ETS classid must be zero");
371 static unsigned long ets_qdisc_bind_tcf(struct Qdisc
*sch
, unsigned long parent
,
374 return ets_class_find(sch
, classid
);
377 static void ets_qdisc_unbind_tcf(struct Qdisc
*sch
, unsigned long arg
)
381 static struct ets_class
*ets_classify(struct sk_buff
*skb
, struct Qdisc
*sch
,
384 struct ets_sched
*q
= qdisc_priv(sch
);
385 u32 band
= skb
->priority
;
386 struct tcf_result res
;
387 struct tcf_proto
*fl
;
390 *qerr
= NET_XMIT_SUCCESS
| __NET_XMIT_BYPASS
;
391 if (TC_H_MAJ(skb
->priority
) != sch
->handle
) {
392 fl
= rcu_dereference_bh(q
->filter_list
);
393 err
= tcf_classify(skb
, fl
, &res
, false);
394 #ifdef CONFIG_NET_CLS_ACT
399 *qerr
= NET_XMIT_SUCCESS
| __NET_XMIT_STOLEN
;
405 if (!fl
|| err
< 0) {
408 return &q
->classes
[q
->prio2band
[band
& TC_PRIO_MAX
]];
412 band
= TC_H_MIN(band
) - 1;
413 if (band
>= q
->nbands
)
414 return &q
->classes
[q
->prio2band
[0]];
415 return &q
->classes
[band
];
418 static int ets_qdisc_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
,
419 struct sk_buff
**to_free
)
421 unsigned int len
= qdisc_pkt_len(skb
);
422 struct ets_sched
*q
= qdisc_priv(sch
);
423 struct ets_class
*cl
;
427 cl
= ets_classify(skb
, sch
, &err
);
429 if (err
& __NET_XMIT_BYPASS
)
430 qdisc_qstats_drop(sch
);
431 __qdisc_drop(skb
, to_free
);
435 first
= !cl
->qdisc
->q
.qlen
;
436 err
= qdisc_enqueue(skb
, cl
->qdisc
, to_free
);
437 if (unlikely(err
!= NET_XMIT_SUCCESS
)) {
438 if (net_xmit_drop_count(err
)) {
440 qdisc_qstats_drop(sch
);
445 if (first
&& !ets_class_is_strict(q
, cl
)) {
446 list_add_tail(&cl
->alist
, &q
->active
);
447 cl
->deficit
= cl
->quantum
;
450 sch
->qstats
.backlog
+= len
;
455 static struct sk_buff
*
456 ets_qdisc_dequeue_skb(struct Qdisc
*sch
, struct sk_buff
*skb
)
458 qdisc_bstats_update(sch
, skb
);
459 qdisc_qstats_backlog_dec(sch
, skb
);
464 static struct sk_buff
*ets_qdisc_dequeue(struct Qdisc
*sch
)
466 struct ets_sched
*q
= qdisc_priv(sch
);
467 struct ets_class
*cl
;
473 for (band
= 0; band
< q
->nstrict
; band
++) {
474 cl
= &q
->classes
[band
];
475 skb
= qdisc_dequeue_peeked(cl
->qdisc
);
477 return ets_qdisc_dequeue_skb(sch
, skb
);
480 if (list_empty(&q
->active
))
483 cl
= list_first_entry(&q
->active
, struct ets_class
, alist
);
484 skb
= cl
->qdisc
->ops
->peek(cl
->qdisc
);
486 qdisc_warn_nonwc(__func__
, cl
->qdisc
);
490 len
= qdisc_pkt_len(skb
);
491 if (len
<= cl
->deficit
) {
493 skb
= qdisc_dequeue_peeked(cl
->qdisc
);
496 if (cl
->qdisc
->q
.qlen
== 0)
497 list_del(&cl
->alist
);
498 return ets_qdisc_dequeue_skb(sch
, skb
);
501 cl
->deficit
+= cl
->quantum
;
502 list_move_tail(&cl
->alist
, &q
->active
);
508 static int ets_qdisc_priomap_parse(struct nlattr
*priomap_attr
,
509 unsigned int nbands
, u8
*priomap
,
510 struct netlink_ext_ack
*extack
)
512 const struct nlattr
*attr
;
518 err
= __nla_validate_nested(priomap_attr
, TCA_ETS_MAX
,
519 ets_priomap_policy
, NL_VALIDATE_STRICT
,
524 nla_for_each_nested(attr
, priomap_attr
, rem
) {
525 switch (nla_type(attr
)) {
526 case TCA_ETS_PRIOMAP_BAND
:
527 if (prio
> TC_PRIO_MAX
) {
528 NL_SET_ERR_MSG_MOD(extack
, "Too many priorities in ETS priomap");
531 band
= nla_get_u8(attr
);
532 if (band
>= nbands
) {
533 NL_SET_ERR_MSG_MOD(extack
, "Invalid band number in ETS priomap");
536 priomap
[prio
++] = band
;
539 WARN_ON_ONCE(1); /* Validate should have caught this. */
547 static int ets_qdisc_quanta_parse(struct Qdisc
*sch
, struct nlattr
*quanta_attr
,
548 unsigned int nbands
, unsigned int nstrict
,
549 unsigned int *quanta
,
550 struct netlink_ext_ack
*extack
)
552 const struct nlattr
*attr
;
557 err
= __nla_validate_nested(quanta_attr
, TCA_ETS_MAX
,
558 ets_quanta_policy
, NL_VALIDATE_STRICT
,
563 nla_for_each_nested(attr
, quanta_attr
, rem
) {
564 switch (nla_type(attr
)) {
565 case TCA_ETS_QUANTA_BAND
:
566 if (band
>= nbands
) {
567 NL_SET_ERR_MSG_MOD(extack
, "ETS quanta has more values than bands");
570 err
= ets_quantum_parse(sch
, attr
, &quanta
[band
++],
576 WARN_ON_ONCE(1); /* Validate should have caught this. */
584 static int ets_qdisc_change(struct Qdisc
*sch
, struct nlattr
*opt
,
585 struct netlink_ext_ack
*extack
)
587 unsigned int quanta
[TCQ_ETS_MAX_BANDS
] = {0};
588 struct Qdisc
*queues
[TCQ_ETS_MAX_BANDS
];
589 struct ets_sched
*q
= qdisc_priv(sch
);
590 struct nlattr
*tb
[TCA_ETS_MAX
+ 1];
591 unsigned int oldbands
= q
->nbands
;
592 u8 priomap
[TC_PRIO_MAX
+ 1];
593 unsigned int nstrict
= 0;
599 NL_SET_ERR_MSG(extack
, "ETS options are required for this operation");
603 err
= nla_parse_nested(tb
, TCA_ETS_MAX
, opt
, ets_policy
, extack
);
607 if (!tb
[TCA_ETS_NBANDS
]) {
608 NL_SET_ERR_MSG_MOD(extack
, "Number of bands is a required argument");
611 nbands
= nla_get_u8(tb
[TCA_ETS_NBANDS
]);
612 if (nbands
< 1 || nbands
> TCQ_ETS_MAX_BANDS
) {
613 NL_SET_ERR_MSG_MOD(extack
, "Invalid number of bands");
616 /* Unless overridden, traffic goes to the last band. */
617 memset(priomap
, nbands
- 1, sizeof(priomap
));
619 if (tb
[TCA_ETS_NSTRICT
]) {
620 nstrict
= nla_get_u8(tb
[TCA_ETS_NSTRICT
]);
621 if (nstrict
> nbands
) {
622 NL_SET_ERR_MSG_MOD(extack
, "Invalid number of strict bands");
627 if (tb
[TCA_ETS_PRIOMAP
]) {
628 err
= ets_qdisc_priomap_parse(tb
[TCA_ETS_PRIOMAP
],
629 nbands
, priomap
, extack
);
634 if (tb
[TCA_ETS_QUANTA
]) {
635 err
= ets_qdisc_quanta_parse(sch
, tb
[TCA_ETS_QUANTA
],
636 nbands
, nstrict
, quanta
, extack
);
640 /* If there are more bands than strict + quanta provided, the remaining
641 * ones are ETS with quantum of MTU. Initialize the missing values here.
643 for (i
= nstrict
; i
< nbands
; i
++) {
645 quanta
[i
] = psched_mtu(qdisc_dev(sch
));
648 /* Before commit, make sure we can allocate all new qdiscs */
649 for (i
= oldbands
; i
< nbands
; i
++) {
650 queues
[i
] = qdisc_create_dflt(sch
->dev_queue
, &pfifo_qdisc_ops
,
651 ets_class_id(sch
, &q
->classes
[i
]),
655 qdisc_put(queues
[--i
]);
663 q
->nstrict
= nstrict
;
664 memcpy(q
->prio2band
, priomap
, sizeof(priomap
));
666 for (i
= q
->nbands
; i
< oldbands
; i
++)
667 qdisc_tree_flush_backlog(q
->classes
[i
].qdisc
);
669 for (i
= 0; i
< q
->nbands
; i
++)
670 q
->classes
[i
].quantum
= quanta
[i
];
672 for (i
= oldbands
; i
< q
->nbands
; i
++) {
673 q
->classes
[i
].qdisc
= queues
[i
];
674 if (q
->classes
[i
].qdisc
!= &noop_qdisc
)
675 qdisc_hash_add(q
->classes
[i
].qdisc
, true);
678 sch_tree_unlock(sch
);
680 ets_offload_change(sch
);
681 for (i
= q
->nbands
; i
< oldbands
; i
++) {
682 qdisc_put(q
->classes
[i
].qdisc
);
683 memset(&q
->classes
[i
], 0, sizeof(q
->classes
[i
]));
688 static int ets_qdisc_init(struct Qdisc
*sch
, struct nlattr
*opt
,
689 struct netlink_ext_ack
*extack
)
691 struct ets_sched
*q
= qdisc_priv(sch
);
697 err
= tcf_block_get(&q
->block
, &q
->filter_list
, sch
, extack
);
701 INIT_LIST_HEAD(&q
->active
);
702 return ets_qdisc_change(sch
, opt
, extack
);
705 static void ets_qdisc_reset(struct Qdisc
*sch
)
707 struct ets_sched
*q
= qdisc_priv(sch
);
710 for (band
= q
->nstrict
; band
< q
->nbands
; band
++) {
711 if (q
->classes
[band
].qdisc
->q
.qlen
)
712 list_del(&q
->classes
[band
].alist
);
714 for (band
= 0; band
< q
->nbands
; band
++)
715 qdisc_reset(q
->classes
[band
].qdisc
);
716 sch
->qstats
.backlog
= 0;
720 static void ets_qdisc_destroy(struct Qdisc
*sch
)
722 struct ets_sched
*q
= qdisc_priv(sch
);
725 ets_offload_destroy(sch
);
726 tcf_block_put(q
->block
);
727 for (band
= 0; band
< q
->nbands
; band
++)
728 qdisc_put(q
->classes
[band
].qdisc
);
731 static int ets_qdisc_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
733 struct ets_sched
*q
= qdisc_priv(sch
);
740 err
= ets_offload_dump(sch
);
744 opts
= nla_nest_start_noflag(skb
, TCA_OPTIONS
);
748 if (nla_put_u8(skb
, TCA_ETS_NBANDS
, q
->nbands
))
752 nla_put_u8(skb
, TCA_ETS_NSTRICT
, q
->nstrict
))
755 if (q
->nbands
> q
->nstrict
) {
756 nest
= nla_nest_start(skb
, TCA_ETS_QUANTA
);
760 for (band
= q
->nstrict
; band
< q
->nbands
; band
++) {
761 if (nla_put_u32(skb
, TCA_ETS_QUANTA_BAND
,
762 q
->classes
[band
].quantum
))
766 nla_nest_end(skb
, nest
);
769 nest
= nla_nest_start(skb
, TCA_ETS_PRIOMAP
);
773 for (prio
= 0; prio
<= TC_PRIO_MAX
; prio
++) {
774 if (nla_put_u8(skb
, TCA_ETS_PRIOMAP_BAND
, q
->prio2band
[prio
]))
778 nla_nest_end(skb
, nest
);
780 return nla_nest_end(skb
, opts
);
783 nla_nest_cancel(skb
, opts
);
787 static const struct Qdisc_class_ops ets_class_ops
= {
788 .change
= ets_class_change
,
789 .graft
= ets_class_graft
,
790 .leaf
= ets_class_leaf
,
791 .find
= ets_class_find
,
792 .qlen_notify
= ets_class_qlen_notify
,
793 .dump
= ets_class_dump
,
794 .dump_stats
= ets_class_dump_stats
,
795 .walk
= ets_qdisc_walk
,
796 .tcf_block
= ets_qdisc_tcf_block
,
797 .bind_tcf
= ets_qdisc_bind_tcf
,
798 .unbind_tcf
= ets_qdisc_unbind_tcf
,
801 static struct Qdisc_ops ets_qdisc_ops __read_mostly
= {
802 .cl_ops
= &ets_class_ops
,
804 .priv_size
= sizeof(struct ets_sched
),
805 .enqueue
= ets_qdisc_enqueue
,
806 .dequeue
= ets_qdisc_dequeue
,
807 .peek
= qdisc_peek_dequeued
,
808 .change
= ets_qdisc_change
,
809 .init
= ets_qdisc_init
,
810 .reset
= ets_qdisc_reset
,
811 .destroy
= ets_qdisc_destroy
,
812 .dump
= ets_qdisc_dump
,
813 .owner
= THIS_MODULE
,
816 static int __init
ets_init(void)
818 return register_qdisc(&ets_qdisc_ops
);
821 static void __exit
ets_exit(void)
823 unregister_qdisc(&ets_qdisc_ops
);
826 module_init(ets_init
);
827 module_exit(ets_exit
);
828 MODULE_LICENSE("GPL");