1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/sch_cbs.c Credit Based Shaper
5 * Authors: Vinicius Costa Gomes <vinicius.gomes@intel.com>
8 /* Credit Based Shaper (CBS)
9 * =========================
11 * This is a simple rate-limiting shaper aimed at TSN applications on
12 * systems with known traffic workloads.
14 * Its algorithm is defined by the IEEE 802.1Q-2014 Specification,
15 * Section 8.6.8.2, and explained in more detail in the Annex L of the
18 * There are four tunables to be considered:
20 * 'idleslope': Idleslope is the rate of credits that is
21 * accumulated (in kilobits per second) when there is at least
22 * one packet waiting for transmission. Packets are transmitted
23 * when the current value of credits is equal or greater than
24 * zero. When there is no packet to be transmitted the amount of
25 * credits is set to zero. This is the main tunable of the CBS
29 * Sendslope is the rate of credits that is depleted (it should be a
30 * negative number of kilobits per second) when a transmission is
31 * ocurring. It can be calculated as follows, (IEEE 802.1Q-2014 Section
34 * sendslope = idleslope - port_transmit_rate
36 * 'hicredit': Hicredit defines the maximum amount of credits (in
37 * bytes) that can be accumulated. Hicredit depends on the
38 * characteristics of interfering traffic,
39 * 'max_interference_size' is the maximum size of any burst of
40 * traffic that can delay the transmission of a frame that is
41 * available for transmission for this traffic class, (IEEE
42 * 802.1Q-2014 Annex L, Equation L-3):
44 * hicredit = max_interference_size * (idleslope / port_transmit_rate)
46 * 'locredit': Locredit is the minimum amount of credits that can
47 * be reached. It is a function of the traffic flowing through
48 * this qdisc (IEEE 802.1Q-2014 Annex L, Equation L-2):
50 * locredit = max_frame_size * (sendslope / port_transmit_rate)
53 #include <linux/module.h>
54 #include <linux/types.h>
55 #include <linux/kernel.h>
56 #include <linux/string.h>
57 #include <linux/errno.h>
58 #include <linux/skbuff.h>
59 #include <net/netevent.h>
60 #include <net/netlink.h>
61 #include <net/sch_generic.h>
62 #include <net/pkt_sched.h>
64 static LIST_HEAD(cbs_list
);
65 static DEFINE_SPINLOCK(cbs_list_lock
);
67 #define BYTES_PER_KBIT (1000LL / 8)
69 struct cbs_sched_data
{
72 atomic64_t port_rate
; /* in bytes/s */
73 s64 last
; /* timestamp in ns */
74 s64 credits
; /* in bytes */
75 s32 locredit
; /* in bytes */
76 s32 hicredit
; /* in bytes */
77 s64 sendslope
; /* in bytes/s */
78 s64 idleslope
; /* in bytes/s */
79 struct qdisc_watchdog watchdog
;
80 int (*enqueue
)(struct sk_buff
*skb
, struct Qdisc
*sch
,
81 struct sk_buff
**to_free
);
82 struct sk_buff
*(*dequeue
)(struct Qdisc
*sch
);
84 struct list_head cbs_list
;
87 static int cbs_child_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
,
89 struct sk_buff
**to_free
)
91 unsigned int len
= qdisc_pkt_len(skb
);
94 err
= child
->ops
->enqueue(skb
, child
, to_free
);
95 if (err
!= NET_XMIT_SUCCESS
)
98 sch
->qstats
.backlog
+= len
;
101 return NET_XMIT_SUCCESS
;
104 static int cbs_enqueue_offload(struct sk_buff
*skb
, struct Qdisc
*sch
,
105 struct sk_buff
**to_free
)
107 struct cbs_sched_data
*q
= qdisc_priv(sch
);
108 struct Qdisc
*qdisc
= q
->qdisc
;
110 return cbs_child_enqueue(skb
, sch
, qdisc
, to_free
);
113 static int cbs_enqueue_soft(struct sk_buff
*skb
, struct Qdisc
*sch
,
114 struct sk_buff
**to_free
)
116 struct cbs_sched_data
*q
= qdisc_priv(sch
);
117 struct Qdisc
*qdisc
= q
->qdisc
;
119 if (sch
->q
.qlen
== 0 && q
->credits
> 0) {
120 /* We need to stop accumulating credits when there's
121 * no enqueued packets and q->credits is positive.
124 q
->last
= ktime_get_ns();
127 return cbs_child_enqueue(skb
, sch
, qdisc
, to_free
);
130 static int cbs_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
,
131 struct sk_buff
**to_free
)
133 struct cbs_sched_data
*q
= qdisc_priv(sch
);
135 return q
->enqueue(skb
, sch
, to_free
);
138 /* timediff is in ns, slope is in bytes/s */
139 static s64
timediff_to_credits(s64 timediff
, s64 slope
)
141 return div64_s64(timediff
* slope
, NSEC_PER_SEC
);
144 static s64
delay_from_credits(s64 credits
, s64 slope
)
146 if (unlikely(slope
== 0))
149 return div64_s64(-credits
* NSEC_PER_SEC
, slope
);
152 static s64
credits_from_len(unsigned int len
, s64 slope
, s64 port_rate
)
154 if (unlikely(port_rate
== 0))
157 return div64_s64(len
* slope
, port_rate
);
160 static struct sk_buff
*cbs_child_dequeue(struct Qdisc
*sch
, struct Qdisc
*child
)
164 skb
= child
->ops
->dequeue(child
);
168 qdisc_qstats_backlog_dec(sch
, skb
);
169 qdisc_bstats_update(sch
, skb
);
175 static struct sk_buff
*cbs_dequeue_soft(struct Qdisc
*sch
)
177 struct cbs_sched_data
*q
= qdisc_priv(sch
);
178 struct Qdisc
*qdisc
= q
->qdisc
;
179 s64 now
= ktime_get_ns();
184 if (q
->credits
< 0) {
185 credits
= timediff_to_credits(now
- q
->last
, q
->idleslope
);
187 credits
= q
->credits
+ credits
;
188 q
->credits
= min_t(s64
, credits
, q
->hicredit
);
190 if (q
->credits
< 0) {
193 delay
= delay_from_credits(q
->credits
, q
->idleslope
);
194 qdisc_watchdog_schedule_ns(&q
->watchdog
, now
+ delay
);
201 skb
= cbs_child_dequeue(sch
, qdisc
);
205 len
= qdisc_pkt_len(skb
);
207 /* As sendslope is a negative number, this will decrease the
208 * amount of q->credits.
210 credits
= credits_from_len(len
, q
->sendslope
,
211 atomic64_read(&q
->port_rate
));
212 credits
+= q
->credits
;
214 q
->credits
= max_t(s64
, credits
, q
->locredit
);
220 static struct sk_buff
*cbs_dequeue_offload(struct Qdisc
*sch
)
222 struct cbs_sched_data
*q
= qdisc_priv(sch
);
223 struct Qdisc
*qdisc
= q
->qdisc
;
225 return cbs_child_dequeue(sch
, qdisc
);
228 static struct sk_buff
*cbs_dequeue(struct Qdisc
*sch
)
230 struct cbs_sched_data
*q
= qdisc_priv(sch
);
232 return q
->dequeue(sch
);
235 static const struct nla_policy cbs_policy
[TCA_CBS_MAX
+ 1] = {
236 [TCA_CBS_PARMS
] = { .len
= sizeof(struct tc_cbs_qopt
) },
239 static void cbs_disable_offload(struct net_device
*dev
,
240 struct cbs_sched_data
*q
)
242 struct tc_cbs_qopt_offload cbs
= { };
243 const struct net_device_ops
*ops
;
249 q
->enqueue
= cbs_enqueue_soft
;
250 q
->dequeue
= cbs_dequeue_soft
;
252 ops
= dev
->netdev_ops
;
253 if (!ops
->ndo_setup_tc
)
256 cbs
.queue
= q
->queue
;
259 err
= ops
->ndo_setup_tc(dev
, TC_SETUP_QDISC_CBS
, &cbs
);
261 pr_warn("Couldn't disable CBS offload for queue %d\n",
265 static int cbs_enable_offload(struct net_device
*dev
, struct cbs_sched_data
*q
,
266 const struct tc_cbs_qopt
*opt
,
267 struct netlink_ext_ack
*extack
)
269 const struct net_device_ops
*ops
= dev
->netdev_ops
;
270 struct tc_cbs_qopt_offload cbs
= { };
273 if (!ops
->ndo_setup_tc
) {
274 NL_SET_ERR_MSG(extack
, "Specified device does not support cbs offload");
278 cbs
.queue
= q
->queue
;
281 cbs
.hicredit
= opt
->hicredit
;
282 cbs
.locredit
= opt
->locredit
;
283 cbs
.idleslope
= opt
->idleslope
;
284 cbs
.sendslope
= opt
->sendslope
;
286 err
= ops
->ndo_setup_tc(dev
, TC_SETUP_QDISC_CBS
, &cbs
);
288 NL_SET_ERR_MSG(extack
, "Specified device failed to setup cbs hardware offload");
292 q
->enqueue
= cbs_enqueue_offload
;
293 q
->dequeue
= cbs_dequeue_offload
;
298 static void cbs_set_port_rate(struct net_device
*dev
, struct cbs_sched_data
*q
)
300 struct ethtool_link_ksettings ecmd
;
301 int speed
= SPEED_10
;
305 err
= __ethtool_get_link_ksettings(dev
, &ecmd
);
309 if (ecmd
.base
.speed
&& ecmd
.base
.speed
!= SPEED_UNKNOWN
)
310 speed
= ecmd
.base
.speed
;
313 port_rate
= speed
* 1000 * BYTES_PER_KBIT
;
315 atomic64_set(&q
->port_rate
, port_rate
);
316 netdev_dbg(dev
, "cbs: set %s's port_rate to: %lld, linkspeed: %d\n",
317 dev
->name
, (long long)atomic64_read(&q
->port_rate
),
321 static int cbs_dev_notifier(struct notifier_block
*nb
, unsigned long event
,
324 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
325 struct cbs_sched_data
*q
;
326 struct net_device
*qdev
;
331 if (event
!= NETDEV_UP
&& event
!= NETDEV_CHANGE
)
334 spin_lock(&cbs_list_lock
);
335 list_for_each_entry(q
, &cbs_list
, cbs_list
) {
336 qdev
= qdisc_dev(q
->qdisc
);
342 spin_unlock(&cbs_list_lock
);
345 cbs_set_port_rate(dev
, q
);
350 static int cbs_change(struct Qdisc
*sch
, struct nlattr
*opt
,
351 struct netlink_ext_ack
*extack
)
353 struct cbs_sched_data
*q
= qdisc_priv(sch
);
354 struct net_device
*dev
= qdisc_dev(sch
);
355 struct nlattr
*tb
[TCA_CBS_MAX
+ 1];
356 struct tc_cbs_qopt
*qopt
;
359 err
= nla_parse_nested_deprecated(tb
, TCA_CBS_MAX
, opt
, cbs_policy
,
364 if (!tb
[TCA_CBS_PARMS
]) {
365 NL_SET_ERR_MSG(extack
, "Missing CBS parameter which are mandatory");
369 qopt
= nla_data(tb
[TCA_CBS_PARMS
]);
371 if (!qopt
->offload
) {
372 cbs_set_port_rate(dev
, q
);
373 cbs_disable_offload(dev
, q
);
375 err
= cbs_enable_offload(dev
, q
, qopt
, extack
);
380 /* Everything went OK, save the parameters used. */
381 q
->hicredit
= qopt
->hicredit
;
382 q
->locredit
= qopt
->locredit
;
383 q
->idleslope
= qopt
->idleslope
* BYTES_PER_KBIT
;
384 q
->sendslope
= qopt
->sendslope
* BYTES_PER_KBIT
;
385 q
->offload
= qopt
->offload
;
390 static int cbs_init(struct Qdisc
*sch
, struct nlattr
*opt
,
391 struct netlink_ext_ack
*extack
)
393 struct cbs_sched_data
*q
= qdisc_priv(sch
);
394 struct net_device
*dev
= qdisc_dev(sch
);
397 NL_SET_ERR_MSG(extack
, "Missing CBS qdisc options which are mandatory");
401 q
->qdisc
= qdisc_create_dflt(sch
->dev_queue
, &pfifo_qdisc_ops
,
402 sch
->handle
, extack
);
406 spin_lock(&cbs_list_lock
);
407 list_add(&q
->cbs_list
, &cbs_list
);
408 spin_unlock(&cbs_list_lock
);
410 qdisc_hash_add(q
->qdisc
, false);
412 q
->queue
= sch
->dev_queue
- netdev_get_tx_queue(dev
, 0);
414 q
->enqueue
= cbs_enqueue_soft
;
415 q
->dequeue
= cbs_dequeue_soft
;
417 qdisc_watchdog_init(&q
->watchdog
, sch
);
419 return cbs_change(sch
, opt
, extack
);
422 static void cbs_destroy(struct Qdisc
*sch
)
424 struct cbs_sched_data
*q
= qdisc_priv(sch
);
425 struct net_device
*dev
= qdisc_dev(sch
);
427 /* Nothing to do if we couldn't create the underlying qdisc */
431 qdisc_watchdog_cancel(&q
->watchdog
);
432 cbs_disable_offload(dev
, q
);
434 spin_lock(&cbs_list_lock
);
435 list_del(&q
->cbs_list
);
436 spin_unlock(&cbs_list_lock
);
441 static int cbs_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
443 struct cbs_sched_data
*q
= qdisc_priv(sch
);
444 struct tc_cbs_qopt opt
= { };
447 nest
= nla_nest_start_noflag(skb
, TCA_OPTIONS
);
449 goto nla_put_failure
;
451 opt
.hicredit
= q
->hicredit
;
452 opt
.locredit
= q
->locredit
;
453 opt
.sendslope
= div64_s64(q
->sendslope
, BYTES_PER_KBIT
);
454 opt
.idleslope
= div64_s64(q
->idleslope
, BYTES_PER_KBIT
);
455 opt
.offload
= q
->offload
;
457 if (nla_put(skb
, TCA_CBS_PARMS
, sizeof(opt
), &opt
))
458 goto nla_put_failure
;
460 return nla_nest_end(skb
, nest
);
463 nla_nest_cancel(skb
, nest
);
467 static int cbs_dump_class(struct Qdisc
*sch
, unsigned long cl
,
468 struct sk_buff
*skb
, struct tcmsg
*tcm
)
470 struct cbs_sched_data
*q
= qdisc_priv(sch
);
472 if (cl
!= 1 || !q
->qdisc
) /* only one class */
475 tcm
->tcm_handle
|= TC_H_MIN(1);
476 tcm
->tcm_info
= q
->qdisc
->handle
;
481 static int cbs_graft(struct Qdisc
*sch
, unsigned long arg
, struct Qdisc
*new,
482 struct Qdisc
**old
, struct netlink_ext_ack
*extack
)
484 struct cbs_sched_data
*q
= qdisc_priv(sch
);
487 new = qdisc_create_dflt(sch
->dev_queue
, &pfifo_qdisc_ops
,
493 *old
= qdisc_replace(sch
, new, &q
->qdisc
);
497 static struct Qdisc
*cbs_leaf(struct Qdisc
*sch
, unsigned long arg
)
499 struct cbs_sched_data
*q
= qdisc_priv(sch
);
504 static unsigned long cbs_find(struct Qdisc
*sch
, u32 classid
)
509 static void cbs_walk(struct Qdisc
*sch
, struct qdisc_walker
*walker
)
512 if (walker
->count
>= walker
->skip
) {
513 if (walker
->fn(sch
, 1, walker
) < 0) {
522 static const struct Qdisc_class_ops cbs_class_ops
= {
527 .dump
= cbs_dump_class
,
530 static struct Qdisc_ops cbs_qdisc_ops __read_mostly
= {
532 .cl_ops
= &cbs_class_ops
,
533 .priv_size
= sizeof(struct cbs_sched_data
),
534 .enqueue
= cbs_enqueue
,
535 .dequeue
= cbs_dequeue
,
536 .peek
= qdisc_peek_dequeued
,
538 .reset
= qdisc_reset_queue
,
539 .destroy
= cbs_destroy
,
540 .change
= cbs_change
,
542 .owner
= THIS_MODULE
,
545 static struct notifier_block cbs_device_notifier
= {
546 .notifier_call
= cbs_dev_notifier
,
549 static int __init
cbs_module_init(void)
553 err
= register_netdevice_notifier(&cbs_device_notifier
);
557 err
= register_qdisc(&cbs_qdisc_ops
);
559 unregister_netdevice_notifier(&cbs_device_notifier
);
564 static void __exit
cbs_module_exit(void)
566 unregister_qdisc(&cbs_qdisc_ops
);
567 unregister_netdevice_notifier(&cbs_device_notifier
);
569 module_init(cbs_module_init
)
570 module_exit(cbs_module_exit
)
571 MODULE_LICENSE("GPL");