1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/sch_cbs.c Credit Based Shaper
5 * Authors: Vinicius Costa Gomes <vinicius.gomes@intel.com>
8 /* Credit Based Shaper (CBS)
9 * =========================
11 * This is a simple rate-limiting shaper aimed at TSN applications on
12 * systems with known traffic workloads.
14 * Its algorithm is defined by the IEEE 802.1Q-2014 Specification,
15 * Section 8.6.8.2, and explained in more detail in the Annex L of the
18 * There are four tunables to be considered:
20 * 'idleslope': Idleslope is the rate of credits that is
21 * accumulated (in kilobits per second) when there is at least
22 * one packet waiting for transmission. Packets are transmitted
23 * when the current value of credits is equal or greater than
24 * zero. When there is no packet to be transmitted the amount of
25 * credits is set to zero. This is the main tunable of the CBS
29 * Sendslope is the rate of credits that is depleted (it should be a
30 * negative number of kilobits per second) when a transmission is
31 * ocurring. It can be calculated as follows, (IEEE 802.1Q-2014 Section
34 * sendslope = idleslope - port_transmit_rate
36 * 'hicredit': Hicredit defines the maximum amount of credits (in
37 * bytes) that can be accumulated. Hicredit depends on the
38 * characteristics of interfering traffic,
39 * 'max_interference_size' is the maximum size of any burst of
40 * traffic that can delay the transmission of a frame that is
41 * available for transmission for this traffic class, (IEEE
42 * 802.1Q-2014 Annex L, Equation L-3):
44 * hicredit = max_interference_size * (idleslope / port_transmit_rate)
46 * 'locredit': Locredit is the minimum amount of credits that can
47 * be reached. It is a function of the traffic flowing through
48 * this qdisc (IEEE 802.1Q-2014 Annex L, Equation L-2):
50 * locredit = max_frame_size * (sendslope / port_transmit_rate)
53 #include <linux/module.h>
54 #include <linux/types.h>
55 #include <linux/kernel.h>
56 #include <linux/string.h>
57 #include <linux/errno.h>
58 #include <linux/skbuff.h>
59 #include <net/netevent.h>
60 #include <net/netlink.h>
61 #include <net/sch_generic.h>
62 #include <net/pkt_sched.h>
64 static LIST_HEAD(cbs_list
);
65 static DEFINE_SPINLOCK(cbs_list_lock
);
67 #define BYTES_PER_KBIT (1000LL / 8)
69 struct cbs_sched_data
{
72 atomic64_t port_rate
; /* in bytes/s */
73 s64 last
; /* timestamp in ns */
74 s64 credits
; /* in bytes */
75 s32 locredit
; /* in bytes */
76 s32 hicredit
; /* in bytes */
77 s64 sendslope
; /* in bytes/s */
78 s64 idleslope
; /* in bytes/s */
79 struct qdisc_watchdog watchdog
;
80 int (*enqueue
)(struct sk_buff
*skb
, struct Qdisc
*sch
,
81 struct sk_buff
**to_free
);
82 struct sk_buff
*(*dequeue
)(struct Qdisc
*sch
);
84 struct list_head cbs_list
;
87 static int cbs_child_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
,
89 struct sk_buff
**to_free
)
91 unsigned int len
= qdisc_pkt_len(skb
);
94 err
= child
->ops
->enqueue(skb
, child
, to_free
);
95 if (err
!= NET_XMIT_SUCCESS
)
98 sch
->qstats
.backlog
+= len
;
101 return NET_XMIT_SUCCESS
;
104 static int cbs_enqueue_offload(struct sk_buff
*skb
, struct Qdisc
*sch
,
105 struct sk_buff
**to_free
)
107 struct cbs_sched_data
*q
= qdisc_priv(sch
);
108 struct Qdisc
*qdisc
= q
->qdisc
;
110 return cbs_child_enqueue(skb
, sch
, qdisc
, to_free
);
113 static int cbs_enqueue_soft(struct sk_buff
*skb
, struct Qdisc
*sch
,
114 struct sk_buff
**to_free
)
116 struct cbs_sched_data
*q
= qdisc_priv(sch
);
117 struct Qdisc
*qdisc
= q
->qdisc
;
119 if (sch
->q
.qlen
== 0 && q
->credits
> 0) {
120 /* We need to stop accumulating credits when there's
121 * no enqueued packets and q->credits is positive.
124 q
->last
= ktime_get_ns();
127 return cbs_child_enqueue(skb
, sch
, qdisc
, to_free
);
130 static int cbs_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
,
131 struct sk_buff
**to_free
)
133 struct cbs_sched_data
*q
= qdisc_priv(sch
);
135 return q
->enqueue(skb
, sch
, to_free
);
138 /* timediff is in ns, slope is in bytes/s */
139 static s64
timediff_to_credits(s64 timediff
, s64 slope
)
141 return div64_s64(timediff
* slope
, NSEC_PER_SEC
);
144 static s64
delay_from_credits(s64 credits
, s64 slope
)
146 if (unlikely(slope
== 0))
149 return div64_s64(-credits
* NSEC_PER_SEC
, slope
);
152 static s64
credits_from_len(unsigned int len
, s64 slope
, s64 port_rate
)
154 if (unlikely(port_rate
== 0))
157 return div64_s64(len
* slope
, port_rate
);
160 static struct sk_buff
*cbs_child_dequeue(struct Qdisc
*sch
, struct Qdisc
*child
)
164 skb
= child
->ops
->dequeue(child
);
168 qdisc_qstats_backlog_dec(sch
, skb
);
169 qdisc_bstats_update(sch
, skb
);
175 static struct sk_buff
*cbs_dequeue_soft(struct Qdisc
*sch
)
177 struct cbs_sched_data
*q
= qdisc_priv(sch
);
178 struct Qdisc
*qdisc
= q
->qdisc
;
179 s64 now
= ktime_get_ns();
184 /* The previous packet is still being sent */
186 qdisc_watchdog_schedule_ns(&q
->watchdog
, q
->last
);
189 if (q
->credits
< 0) {
190 credits
= timediff_to_credits(now
- q
->last
, q
->idleslope
);
192 credits
= q
->credits
+ credits
;
193 q
->credits
= min_t(s64
, credits
, q
->hicredit
);
195 if (q
->credits
< 0) {
198 delay
= delay_from_credits(q
->credits
, q
->idleslope
);
199 qdisc_watchdog_schedule_ns(&q
->watchdog
, now
+ delay
);
206 skb
= cbs_child_dequeue(sch
, qdisc
);
210 len
= qdisc_pkt_len(skb
);
212 /* As sendslope is a negative number, this will decrease the
213 * amount of q->credits.
215 credits
= credits_from_len(len
, q
->sendslope
,
216 atomic64_read(&q
->port_rate
));
217 credits
+= q
->credits
;
219 q
->credits
= max_t(s64
, credits
, q
->locredit
);
220 /* Estimate of the transmission of the last byte of the packet in ns */
221 if (unlikely(atomic64_read(&q
->port_rate
) == 0))
224 q
->last
= now
+ div64_s64(len
* NSEC_PER_SEC
,
225 atomic64_read(&q
->port_rate
));
230 static struct sk_buff
*cbs_dequeue_offload(struct Qdisc
*sch
)
232 struct cbs_sched_data
*q
= qdisc_priv(sch
);
233 struct Qdisc
*qdisc
= q
->qdisc
;
235 return cbs_child_dequeue(sch
, qdisc
);
238 static struct sk_buff
*cbs_dequeue(struct Qdisc
*sch
)
240 struct cbs_sched_data
*q
= qdisc_priv(sch
);
242 return q
->dequeue(sch
);
245 static const struct nla_policy cbs_policy
[TCA_CBS_MAX
+ 1] = {
246 [TCA_CBS_PARMS
] = { .len
= sizeof(struct tc_cbs_qopt
) },
249 static void cbs_disable_offload(struct net_device
*dev
,
250 struct cbs_sched_data
*q
)
252 struct tc_cbs_qopt_offload cbs
= { };
253 const struct net_device_ops
*ops
;
259 q
->enqueue
= cbs_enqueue_soft
;
260 q
->dequeue
= cbs_dequeue_soft
;
262 ops
= dev
->netdev_ops
;
263 if (!ops
->ndo_setup_tc
)
266 cbs
.queue
= q
->queue
;
269 err
= ops
->ndo_setup_tc(dev
, TC_SETUP_QDISC_CBS
, &cbs
);
271 pr_warn("Couldn't disable CBS offload for queue %d\n",
275 static int cbs_enable_offload(struct net_device
*dev
, struct cbs_sched_data
*q
,
276 const struct tc_cbs_qopt
*opt
,
277 struct netlink_ext_ack
*extack
)
279 const struct net_device_ops
*ops
= dev
->netdev_ops
;
280 struct tc_cbs_qopt_offload cbs
= { };
283 if (!ops
->ndo_setup_tc
) {
284 NL_SET_ERR_MSG(extack
, "Specified device does not support cbs offload");
288 cbs
.queue
= q
->queue
;
291 cbs
.hicredit
= opt
->hicredit
;
292 cbs
.locredit
= opt
->locredit
;
293 cbs
.idleslope
= opt
->idleslope
;
294 cbs
.sendslope
= opt
->sendslope
;
296 err
= ops
->ndo_setup_tc(dev
, TC_SETUP_QDISC_CBS
, &cbs
);
298 NL_SET_ERR_MSG(extack
, "Specified device failed to setup cbs hardware offload");
302 q
->enqueue
= cbs_enqueue_offload
;
303 q
->dequeue
= cbs_dequeue_offload
;
308 static void cbs_set_port_rate(struct net_device
*dev
, struct cbs_sched_data
*q
)
310 struct ethtool_link_ksettings ecmd
;
311 int speed
= SPEED_10
;
315 err
= __ethtool_get_link_ksettings(dev
, &ecmd
);
319 if (ecmd
.base
.speed
&& ecmd
.base
.speed
!= SPEED_UNKNOWN
)
320 speed
= ecmd
.base
.speed
;
323 port_rate
= speed
* 1000 * BYTES_PER_KBIT
;
325 atomic64_set(&q
->port_rate
, port_rate
);
326 netdev_dbg(dev
, "cbs: set %s's port_rate to: %lld, linkspeed: %d\n",
327 dev
->name
, (long long)atomic64_read(&q
->port_rate
),
331 static int cbs_dev_notifier(struct notifier_block
*nb
, unsigned long event
,
334 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
335 struct cbs_sched_data
*q
;
336 struct net_device
*qdev
;
341 if (event
!= NETDEV_UP
&& event
!= NETDEV_CHANGE
)
344 spin_lock(&cbs_list_lock
);
345 list_for_each_entry(q
, &cbs_list
, cbs_list
) {
346 qdev
= qdisc_dev(q
->qdisc
);
352 spin_unlock(&cbs_list_lock
);
355 cbs_set_port_rate(dev
, q
);
360 static int cbs_change(struct Qdisc
*sch
, struct nlattr
*opt
,
361 struct netlink_ext_ack
*extack
)
363 struct cbs_sched_data
*q
= qdisc_priv(sch
);
364 struct net_device
*dev
= qdisc_dev(sch
);
365 struct nlattr
*tb
[TCA_CBS_MAX
+ 1];
366 struct tc_cbs_qopt
*qopt
;
369 err
= nla_parse_nested_deprecated(tb
, TCA_CBS_MAX
, opt
, cbs_policy
,
374 if (!tb
[TCA_CBS_PARMS
]) {
375 NL_SET_ERR_MSG(extack
, "Missing CBS parameter which are mandatory");
379 qopt
= nla_data(tb
[TCA_CBS_PARMS
]);
381 if (!qopt
->offload
) {
382 cbs_set_port_rate(dev
, q
);
383 cbs_disable_offload(dev
, q
);
385 err
= cbs_enable_offload(dev
, q
, qopt
, extack
);
390 /* Everything went OK, save the parameters used. */
391 q
->hicredit
= qopt
->hicredit
;
392 q
->locredit
= qopt
->locredit
;
393 q
->idleslope
= qopt
->idleslope
* BYTES_PER_KBIT
;
394 q
->sendslope
= qopt
->sendslope
* BYTES_PER_KBIT
;
395 q
->offload
= qopt
->offload
;
400 static int cbs_init(struct Qdisc
*sch
, struct nlattr
*opt
,
401 struct netlink_ext_ack
*extack
)
403 struct cbs_sched_data
*q
= qdisc_priv(sch
);
404 struct net_device
*dev
= qdisc_dev(sch
);
407 NL_SET_ERR_MSG(extack
, "Missing CBS qdisc options which are mandatory");
411 q
->qdisc
= qdisc_create_dflt(sch
->dev_queue
, &pfifo_qdisc_ops
,
412 sch
->handle
, extack
);
416 spin_lock(&cbs_list_lock
);
417 list_add(&q
->cbs_list
, &cbs_list
);
418 spin_unlock(&cbs_list_lock
);
420 qdisc_hash_add(q
->qdisc
, false);
422 q
->queue
= sch
->dev_queue
- netdev_get_tx_queue(dev
, 0);
424 q
->enqueue
= cbs_enqueue_soft
;
425 q
->dequeue
= cbs_dequeue_soft
;
427 qdisc_watchdog_init(&q
->watchdog
, sch
);
429 return cbs_change(sch
, opt
, extack
);
432 static void cbs_destroy(struct Qdisc
*sch
)
434 struct cbs_sched_data
*q
= qdisc_priv(sch
);
435 struct net_device
*dev
= qdisc_dev(sch
);
437 /* Nothing to do if we couldn't create the underlying qdisc */
441 qdisc_watchdog_cancel(&q
->watchdog
);
442 cbs_disable_offload(dev
, q
);
444 spin_lock(&cbs_list_lock
);
445 list_del(&q
->cbs_list
);
446 spin_unlock(&cbs_list_lock
);
451 static int cbs_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
453 struct cbs_sched_data
*q
= qdisc_priv(sch
);
454 struct tc_cbs_qopt opt
= { };
457 nest
= nla_nest_start_noflag(skb
, TCA_OPTIONS
);
459 goto nla_put_failure
;
461 opt
.hicredit
= q
->hicredit
;
462 opt
.locredit
= q
->locredit
;
463 opt
.sendslope
= div64_s64(q
->sendslope
, BYTES_PER_KBIT
);
464 opt
.idleslope
= div64_s64(q
->idleslope
, BYTES_PER_KBIT
);
465 opt
.offload
= q
->offload
;
467 if (nla_put(skb
, TCA_CBS_PARMS
, sizeof(opt
), &opt
))
468 goto nla_put_failure
;
470 return nla_nest_end(skb
, nest
);
473 nla_nest_cancel(skb
, nest
);
477 static int cbs_dump_class(struct Qdisc
*sch
, unsigned long cl
,
478 struct sk_buff
*skb
, struct tcmsg
*tcm
)
480 struct cbs_sched_data
*q
= qdisc_priv(sch
);
482 if (cl
!= 1 || !q
->qdisc
) /* only one class */
485 tcm
->tcm_handle
|= TC_H_MIN(1);
486 tcm
->tcm_info
= q
->qdisc
->handle
;
491 static int cbs_graft(struct Qdisc
*sch
, unsigned long arg
, struct Qdisc
*new,
492 struct Qdisc
**old
, struct netlink_ext_ack
*extack
)
494 struct cbs_sched_data
*q
= qdisc_priv(sch
);
497 new = qdisc_create_dflt(sch
->dev_queue
, &pfifo_qdisc_ops
,
503 *old
= qdisc_replace(sch
, new, &q
->qdisc
);
507 static struct Qdisc
*cbs_leaf(struct Qdisc
*sch
, unsigned long arg
)
509 struct cbs_sched_data
*q
= qdisc_priv(sch
);
514 static unsigned long cbs_find(struct Qdisc
*sch
, u32 classid
)
519 static void cbs_walk(struct Qdisc
*sch
, struct qdisc_walker
*walker
)
522 if (walker
->count
>= walker
->skip
) {
523 if (walker
->fn(sch
, 1, walker
) < 0) {
532 static const struct Qdisc_class_ops cbs_class_ops
= {
537 .dump
= cbs_dump_class
,
540 static struct Qdisc_ops cbs_qdisc_ops __read_mostly
= {
542 .cl_ops
= &cbs_class_ops
,
543 .priv_size
= sizeof(struct cbs_sched_data
),
544 .enqueue
= cbs_enqueue
,
545 .dequeue
= cbs_dequeue
,
546 .peek
= qdisc_peek_dequeued
,
548 .reset
= qdisc_reset_queue
,
549 .destroy
= cbs_destroy
,
550 .change
= cbs_change
,
552 .owner
= THIS_MODULE
,
555 static struct notifier_block cbs_device_notifier
= {
556 .notifier_call
= cbs_dev_notifier
,
559 static int __init
cbs_module_init(void)
563 err
= register_netdevice_notifier(&cbs_device_notifier
);
567 err
= register_qdisc(&cbs_qdisc_ops
);
569 unregister_netdevice_notifier(&cbs_device_notifier
);
574 static void __exit
cbs_module_exit(void)
576 unregister_qdisc(&cbs_qdisc_ops
);
577 unregister_netdevice_notifier(&cbs_device_notifier
);
579 module_init(cbs_module_init
)
580 module_exit(cbs_module_exit
)
581 MODULE_LICENSE("GPL");