1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/sch_cbs.c Credit Based Shaper
5 * Authors: Vinicius Costa Gomes <vinicius.gomes@intel.com>
8 /* Credit Based Shaper (CBS)
9 * =========================
11 * This is a simple rate-limiting shaper aimed at TSN applications on
12 * systems with known traffic workloads.
14 * Its algorithm is defined by the IEEE 802.1Q-2014 Specification,
15 * Section 8.6.8.2, and explained in more detail in the Annex L of the
18 * There are four tunables to be considered:
20 * 'idleslope': Idleslope is the rate of credits that is
21 * accumulated (in kilobits per second) when there is at least
22 * one packet waiting for transmission. Packets are transmitted
23 * when the current value of credits is equal or greater than
24 * zero. When there is no packet to be transmitted the amount of
25 * credits is set to zero. This is the main tunable of the CBS
29 * Sendslope is the rate of credits that is depleted (it should be a
30 * negative number of kilobits per second) when a transmission is
31 * ocurring. It can be calculated as follows, (IEEE 802.1Q-2014 Section
34 * sendslope = idleslope - port_transmit_rate
36 * 'hicredit': Hicredit defines the maximum amount of credits (in
37 * bytes) that can be accumulated. Hicredit depends on the
38 * characteristics of interfering traffic,
39 * 'max_interference_size' is the maximum size of any burst of
40 * traffic that can delay the transmission of a frame that is
41 * available for transmission for this traffic class, (IEEE
42 * 802.1Q-2014 Annex L, Equation L-3):
44 * hicredit = max_interference_size * (idleslope / port_transmit_rate)
46 * 'locredit': Locredit is the minimum amount of credits that can
47 * be reached. It is a function of the traffic flowing through
48 * this qdisc (IEEE 802.1Q-2014 Annex L, Equation L-2):
50 * locredit = max_frame_size * (sendslope / port_transmit_rate)
53 #include <linux/ethtool.h>
54 #include <linux/module.h>
55 #include <linux/types.h>
56 #include <linux/kernel.h>
57 #include <linux/string.h>
58 #include <linux/errno.h>
59 #include <linux/skbuff.h>
60 #include <linux/units.h>
62 #include <net/netevent.h>
63 #include <net/netlink.h>
64 #include <net/sch_generic.h>
65 #include <net/pkt_sched.h>
67 static LIST_HEAD(cbs_list
);
68 static DEFINE_SPINLOCK(cbs_list_lock
);
70 struct cbs_sched_data
{
73 atomic64_t port_rate
; /* in bytes/s */
74 s64 last
; /* timestamp in ns */
75 s64 credits
; /* in bytes */
76 s32 locredit
; /* in bytes */
77 s32 hicredit
; /* in bytes */
78 s64 sendslope
; /* in bytes/s */
79 s64 idleslope
; /* in bytes/s */
80 struct qdisc_watchdog watchdog
;
81 int (*enqueue
)(struct sk_buff
*skb
, struct Qdisc
*sch
,
82 struct sk_buff
**to_free
);
83 struct sk_buff
*(*dequeue
)(struct Qdisc
*sch
);
85 struct list_head cbs_list
;
88 static int cbs_child_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
,
90 struct sk_buff
**to_free
)
92 unsigned int len
= qdisc_pkt_len(skb
);
95 err
= child
->ops
->enqueue(skb
, child
, to_free
);
96 if (err
!= NET_XMIT_SUCCESS
)
99 sch
->qstats
.backlog
+= len
;
102 return NET_XMIT_SUCCESS
;
105 static int cbs_enqueue_offload(struct sk_buff
*skb
, struct Qdisc
*sch
,
106 struct sk_buff
**to_free
)
108 struct cbs_sched_data
*q
= qdisc_priv(sch
);
109 struct Qdisc
*qdisc
= q
->qdisc
;
111 return cbs_child_enqueue(skb
, sch
, qdisc
, to_free
);
114 static int cbs_enqueue_soft(struct sk_buff
*skb
, struct Qdisc
*sch
,
115 struct sk_buff
**to_free
)
117 struct cbs_sched_data
*q
= qdisc_priv(sch
);
118 struct Qdisc
*qdisc
= q
->qdisc
;
120 if (sch
->q
.qlen
== 0 && q
->credits
> 0) {
121 /* We need to stop accumulating credits when there's
122 * no enqueued packets and q->credits is positive.
125 q
->last
= ktime_get_ns();
128 return cbs_child_enqueue(skb
, sch
, qdisc
, to_free
);
131 static int cbs_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
,
132 struct sk_buff
**to_free
)
134 struct cbs_sched_data
*q
= qdisc_priv(sch
);
136 return q
->enqueue(skb
, sch
, to_free
);
139 /* timediff is in ns, slope is in bytes/s */
140 static s64
timediff_to_credits(s64 timediff
, s64 slope
)
142 return div64_s64(timediff
* slope
, NSEC_PER_SEC
);
145 static s64
delay_from_credits(s64 credits
, s64 slope
)
147 if (unlikely(slope
== 0))
150 return div64_s64(-credits
* NSEC_PER_SEC
, slope
);
153 static s64
credits_from_len(unsigned int len
, s64 slope
, s64 port_rate
)
155 if (unlikely(port_rate
== 0))
158 return div64_s64(len
* slope
, port_rate
);
161 static struct sk_buff
*cbs_child_dequeue(struct Qdisc
*sch
, struct Qdisc
*child
)
165 skb
= child
->ops
->dequeue(child
);
169 qdisc_qstats_backlog_dec(sch
, skb
);
170 qdisc_bstats_update(sch
, skb
);
176 static struct sk_buff
*cbs_dequeue_soft(struct Qdisc
*sch
)
178 struct cbs_sched_data
*q
= qdisc_priv(sch
);
179 struct Qdisc
*qdisc
= q
->qdisc
;
180 s64 now
= ktime_get_ns();
185 /* The previous packet is still being sent */
187 qdisc_watchdog_schedule_ns(&q
->watchdog
, q
->last
);
190 if (q
->credits
< 0) {
191 credits
= timediff_to_credits(now
- q
->last
, q
->idleslope
);
193 credits
= q
->credits
+ credits
;
194 q
->credits
= min_t(s64
, credits
, q
->hicredit
);
196 if (q
->credits
< 0) {
199 delay
= delay_from_credits(q
->credits
, q
->idleslope
);
200 qdisc_watchdog_schedule_ns(&q
->watchdog
, now
+ delay
);
207 skb
= cbs_child_dequeue(sch
, qdisc
);
211 len
= qdisc_pkt_len(skb
);
213 /* As sendslope is a negative number, this will decrease the
214 * amount of q->credits.
216 credits
= credits_from_len(len
, q
->sendslope
,
217 atomic64_read(&q
->port_rate
));
218 credits
+= q
->credits
;
220 q
->credits
= max_t(s64
, credits
, q
->locredit
);
221 /* Estimate of the transmission of the last byte of the packet in ns */
222 if (unlikely(atomic64_read(&q
->port_rate
) == 0))
225 q
->last
= now
+ div64_s64(len
* NSEC_PER_SEC
,
226 atomic64_read(&q
->port_rate
));
231 static struct sk_buff
*cbs_dequeue_offload(struct Qdisc
*sch
)
233 struct cbs_sched_data
*q
= qdisc_priv(sch
);
234 struct Qdisc
*qdisc
= q
->qdisc
;
236 return cbs_child_dequeue(sch
, qdisc
);
239 static struct sk_buff
*cbs_dequeue(struct Qdisc
*sch
)
241 struct cbs_sched_data
*q
= qdisc_priv(sch
);
243 return q
->dequeue(sch
);
246 static const struct nla_policy cbs_policy
[TCA_CBS_MAX
+ 1] = {
247 [TCA_CBS_PARMS
] = { .len
= sizeof(struct tc_cbs_qopt
) },
250 static void cbs_disable_offload(struct net_device
*dev
,
251 struct cbs_sched_data
*q
)
253 struct tc_cbs_qopt_offload cbs
= { };
254 const struct net_device_ops
*ops
;
260 q
->enqueue
= cbs_enqueue_soft
;
261 q
->dequeue
= cbs_dequeue_soft
;
263 ops
= dev
->netdev_ops
;
264 if (!ops
->ndo_setup_tc
)
267 cbs
.queue
= q
->queue
;
270 err
= ops
->ndo_setup_tc(dev
, TC_SETUP_QDISC_CBS
, &cbs
);
272 pr_warn("Couldn't disable CBS offload for queue %d\n",
276 static int cbs_enable_offload(struct net_device
*dev
, struct cbs_sched_data
*q
,
277 const struct tc_cbs_qopt
*opt
,
278 struct netlink_ext_ack
*extack
)
280 const struct net_device_ops
*ops
= dev
->netdev_ops
;
281 struct tc_cbs_qopt_offload cbs
= { };
284 if (!ops
->ndo_setup_tc
) {
285 NL_SET_ERR_MSG(extack
, "Specified device does not support cbs offload");
289 cbs
.queue
= q
->queue
;
292 cbs
.hicredit
= opt
->hicredit
;
293 cbs
.locredit
= opt
->locredit
;
294 cbs
.idleslope
= opt
->idleslope
;
295 cbs
.sendslope
= opt
->sendslope
;
297 err
= ops
->ndo_setup_tc(dev
, TC_SETUP_QDISC_CBS
, &cbs
);
299 NL_SET_ERR_MSG(extack
, "Specified device failed to setup cbs hardware offload");
303 q
->enqueue
= cbs_enqueue_offload
;
304 q
->dequeue
= cbs_dequeue_offload
;
309 static void cbs_set_port_rate(struct net_device
*dev
, struct cbs_sched_data
*q
)
311 struct ethtool_link_ksettings ecmd
;
312 int speed
= SPEED_10
;
316 err
= __ethtool_get_link_ksettings(dev
, &ecmd
);
320 if (ecmd
.base
.speed
&& ecmd
.base
.speed
!= SPEED_UNKNOWN
)
321 speed
= ecmd
.base
.speed
;
324 port_rate
= speed
* 1000 * BYTES_PER_KBIT
;
326 atomic64_set(&q
->port_rate
, port_rate
);
327 netdev_dbg(dev
, "cbs: set %s's port_rate to: %lld, linkspeed: %d\n",
328 dev
->name
, (long long)atomic64_read(&q
->port_rate
),
332 static int cbs_dev_notifier(struct notifier_block
*nb
, unsigned long event
,
335 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
336 struct cbs_sched_data
*q
;
337 struct net_device
*qdev
;
342 if (event
!= NETDEV_UP
&& event
!= NETDEV_CHANGE
)
345 spin_lock(&cbs_list_lock
);
346 list_for_each_entry(q
, &cbs_list
, cbs_list
) {
347 qdev
= qdisc_dev(q
->qdisc
);
353 spin_unlock(&cbs_list_lock
);
356 cbs_set_port_rate(dev
, q
);
361 static int cbs_change(struct Qdisc
*sch
, struct nlattr
*opt
,
362 struct netlink_ext_ack
*extack
)
364 struct cbs_sched_data
*q
= qdisc_priv(sch
);
365 struct net_device
*dev
= qdisc_dev(sch
);
366 struct nlattr
*tb
[TCA_CBS_MAX
+ 1];
367 struct tc_cbs_qopt
*qopt
;
370 err
= nla_parse_nested_deprecated(tb
, TCA_CBS_MAX
, opt
, cbs_policy
,
375 if (!tb
[TCA_CBS_PARMS
]) {
376 NL_SET_ERR_MSG(extack
, "Missing CBS parameter which are mandatory");
380 qopt
= nla_data(tb
[TCA_CBS_PARMS
]);
382 if (!qopt
->offload
) {
383 cbs_set_port_rate(dev
, q
);
384 cbs_disable_offload(dev
, q
);
386 err
= cbs_enable_offload(dev
, q
, qopt
, extack
);
391 /* Everything went OK, save the parameters used. */
392 WRITE_ONCE(q
->hicredit
, qopt
->hicredit
);
393 WRITE_ONCE(q
->locredit
, qopt
->locredit
);
394 WRITE_ONCE(q
->idleslope
, qopt
->idleslope
* BYTES_PER_KBIT
);
395 WRITE_ONCE(q
->sendslope
, qopt
->sendslope
* BYTES_PER_KBIT
);
396 WRITE_ONCE(q
->offload
, qopt
->offload
);
401 static int cbs_init(struct Qdisc
*sch
, struct nlattr
*opt
,
402 struct netlink_ext_ack
*extack
)
404 struct cbs_sched_data
*q
= qdisc_priv(sch
);
405 struct net_device
*dev
= qdisc_dev(sch
);
408 NL_SET_ERR_MSG(extack
, "Missing CBS qdisc options which are mandatory");
412 q
->qdisc
= qdisc_create_dflt(sch
->dev_queue
, &pfifo_qdisc_ops
,
413 sch
->handle
, extack
);
417 spin_lock(&cbs_list_lock
);
418 list_add(&q
->cbs_list
, &cbs_list
);
419 spin_unlock(&cbs_list_lock
);
421 qdisc_hash_add(q
->qdisc
, false);
423 q
->queue
= sch
->dev_queue
- netdev_get_tx_queue(dev
, 0);
425 q
->enqueue
= cbs_enqueue_soft
;
426 q
->dequeue
= cbs_dequeue_soft
;
428 qdisc_watchdog_init(&q
->watchdog
, sch
);
430 return cbs_change(sch
, opt
, extack
);
433 static void cbs_destroy(struct Qdisc
*sch
)
435 struct cbs_sched_data
*q
= qdisc_priv(sch
);
436 struct net_device
*dev
= qdisc_dev(sch
);
438 /* Nothing to do if we couldn't create the underlying qdisc */
442 qdisc_watchdog_cancel(&q
->watchdog
);
443 cbs_disable_offload(dev
, q
);
445 spin_lock(&cbs_list_lock
);
446 list_del(&q
->cbs_list
);
447 spin_unlock(&cbs_list_lock
);
452 static int cbs_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
454 struct cbs_sched_data
*q
= qdisc_priv(sch
);
455 struct tc_cbs_qopt opt
= { };
458 nest
= nla_nest_start_noflag(skb
, TCA_OPTIONS
);
460 goto nla_put_failure
;
462 opt
.hicredit
= READ_ONCE(q
->hicredit
);
463 opt
.locredit
= READ_ONCE(q
->locredit
);
464 opt
.sendslope
= div64_s64(READ_ONCE(q
->sendslope
), BYTES_PER_KBIT
);
465 opt
.idleslope
= div64_s64(READ_ONCE(q
->idleslope
), BYTES_PER_KBIT
);
466 opt
.offload
= READ_ONCE(q
->offload
);
468 if (nla_put(skb
, TCA_CBS_PARMS
, sizeof(opt
), &opt
))
469 goto nla_put_failure
;
471 return nla_nest_end(skb
, nest
);
474 nla_nest_cancel(skb
, nest
);
478 static int cbs_dump_class(struct Qdisc
*sch
, unsigned long cl
,
479 struct sk_buff
*skb
, struct tcmsg
*tcm
)
481 struct cbs_sched_data
*q
= qdisc_priv(sch
);
483 if (cl
!= 1 || !q
->qdisc
) /* only one class */
486 tcm
->tcm_handle
|= TC_H_MIN(1);
487 tcm
->tcm_info
= q
->qdisc
->handle
;
492 static int cbs_graft(struct Qdisc
*sch
, unsigned long arg
, struct Qdisc
*new,
493 struct Qdisc
**old
, struct netlink_ext_ack
*extack
)
495 struct cbs_sched_data
*q
= qdisc_priv(sch
);
498 new = qdisc_create_dflt(sch
->dev_queue
, &pfifo_qdisc_ops
,
504 *old
= qdisc_replace(sch
, new, &q
->qdisc
);
508 static struct Qdisc
*cbs_leaf(struct Qdisc
*sch
, unsigned long arg
)
510 struct cbs_sched_data
*q
= qdisc_priv(sch
);
515 static unsigned long cbs_find(struct Qdisc
*sch
, u32 classid
)
520 static void cbs_walk(struct Qdisc
*sch
, struct qdisc_walker
*walker
)
523 tc_qdisc_stats_dump(sch
, 1, walker
);
527 static const struct Qdisc_class_ops cbs_class_ops
= {
532 .dump
= cbs_dump_class
,
535 static struct Qdisc_ops cbs_qdisc_ops __read_mostly
= {
537 .cl_ops
= &cbs_class_ops
,
538 .priv_size
= sizeof(struct cbs_sched_data
),
539 .enqueue
= cbs_enqueue
,
540 .dequeue
= cbs_dequeue
,
541 .peek
= qdisc_peek_dequeued
,
543 .reset
= qdisc_reset_queue
,
544 .destroy
= cbs_destroy
,
545 .change
= cbs_change
,
547 .owner
= THIS_MODULE
,
549 MODULE_ALIAS_NET_SCH("cbs");
551 static struct notifier_block cbs_device_notifier
= {
552 .notifier_call
= cbs_dev_notifier
,
555 static int __init
cbs_module_init(void)
559 err
= register_netdevice_notifier(&cbs_device_notifier
);
563 err
= register_qdisc(&cbs_qdisc_ops
);
565 unregister_netdevice_notifier(&cbs_device_notifier
);
570 static void __exit
cbs_module_exit(void)
572 unregister_qdisc(&cbs_qdisc_ops
);
573 unregister_netdevice_notifier(&cbs_device_notifier
);
575 module_init(cbs_module_init
)
576 module_exit(cbs_module_exit
)
577 MODULE_LICENSE("GPL");
578 MODULE_DESCRIPTION("Credit Based shaper");