mlx5: update timecounter at least twice per counter overflow
[linux/fpc-iii.git] / net / sched / sch_taprio.c
blob206e4dbed12f0e08a2c8782fedac1247995001bb
1 // SPDX-License-Identifier: GPL-2.0
3 /* net/sched/sch_taprio.c Time Aware Priority Scheduler
5 * Authors: Vinicius Costa Gomes <vinicius.gomes@intel.com>
7 */
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/list.h>
14 #include <linux/errno.h>
15 #include <linux/skbuff.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
18 #include <net/netlink.h>
19 #include <net/pkt_sched.h>
20 #include <net/pkt_cls.h>
21 #include <net/sch_generic.h>
23 #define TAPRIO_ALL_GATES_OPEN -1
25 struct sched_entry {
26 struct list_head list;
28 /* The instant that this entry "closes" and the next one
29 * should open, the qdisc will make some effort so that no
30 * packet leaves after this time.
32 ktime_t close_time;
33 atomic_t budget;
34 int index;
35 u32 gate_mask;
36 u32 interval;
37 u8 command;
40 struct taprio_sched {
41 struct Qdisc **qdiscs;
42 struct Qdisc *root;
43 s64 base_time;
44 int clockid;
45 int picos_per_byte; /* Using picoseconds because for 10Gbps+
46 * speeds it's sub-nanoseconds per byte
48 size_t num_entries;
50 /* Protects the update side of the RCU protected current_entry */
51 spinlock_t current_entry_lock;
52 struct sched_entry __rcu *current_entry;
53 struct list_head entries;
54 ktime_t (*get_time)(void);
55 struct hrtimer advance_timer;
58 static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
59 struct sk_buff **to_free)
61 struct taprio_sched *q = qdisc_priv(sch);
62 struct Qdisc *child;
63 int queue;
65 queue = skb_get_queue_mapping(skb);
67 child = q->qdiscs[queue];
68 if (unlikely(!child))
69 return qdisc_drop(skb, sch, to_free);
71 qdisc_qstats_backlog_inc(sch, skb);
72 sch->q.qlen++;
74 return qdisc_enqueue(skb, child, to_free);
77 static struct sk_buff *taprio_peek(struct Qdisc *sch)
79 struct taprio_sched *q = qdisc_priv(sch);
80 struct net_device *dev = qdisc_dev(sch);
81 struct sched_entry *entry;
82 struct sk_buff *skb;
83 u32 gate_mask;
84 int i;
86 rcu_read_lock();
87 entry = rcu_dereference(q->current_entry);
88 gate_mask = entry ? entry->gate_mask : -1;
89 rcu_read_unlock();
91 if (!gate_mask)
92 return NULL;
94 for (i = 0; i < dev->num_tx_queues; i++) {
95 struct Qdisc *child = q->qdiscs[i];
96 int prio;
97 u8 tc;
99 if (unlikely(!child))
100 continue;
102 skb = child->ops->peek(child);
103 if (!skb)
104 continue;
106 prio = skb->priority;
107 tc = netdev_get_prio_tc_map(dev, prio);
109 if (!(gate_mask & BIT(tc)))
110 return NULL;
112 return skb;
115 return NULL;
118 static inline int length_to_duration(struct taprio_sched *q, int len)
120 return (len * q->picos_per_byte) / 1000;
123 static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
125 struct taprio_sched *q = qdisc_priv(sch);
126 struct net_device *dev = qdisc_dev(sch);
127 struct sched_entry *entry;
128 struct sk_buff *skb;
129 u32 gate_mask;
130 int i;
132 rcu_read_lock();
133 entry = rcu_dereference(q->current_entry);
134 /* if there's no entry, it means that the schedule didn't
135 * start yet, so force all gates to be open, this is in
136 * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5
137 * "AdminGateSates"
139 gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
140 rcu_read_unlock();
142 if (!gate_mask)
143 return NULL;
145 for (i = 0; i < dev->num_tx_queues; i++) {
146 struct Qdisc *child = q->qdiscs[i];
147 ktime_t guard;
148 int prio;
149 int len;
150 u8 tc;
152 if (unlikely(!child))
153 continue;
155 skb = child->ops->peek(child);
156 if (!skb)
157 continue;
159 prio = skb->priority;
160 tc = netdev_get_prio_tc_map(dev, prio);
162 if (!(gate_mask & BIT(tc)))
163 continue;
165 len = qdisc_pkt_len(skb);
166 guard = ktime_add_ns(q->get_time(),
167 length_to_duration(q, len));
169 /* In the case that there's no gate entry, there's no
170 * guard band ...
172 if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
173 ktime_after(guard, entry->close_time))
174 return NULL;
176 /* ... and no budget. */
177 if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
178 atomic_sub_return(len, &entry->budget) < 0)
179 return NULL;
181 skb = child->ops->dequeue(child);
182 if (unlikely(!skb))
183 return NULL;
185 qdisc_bstats_update(sch, skb);
186 qdisc_qstats_backlog_dec(sch, skb);
187 sch->q.qlen--;
189 return skb;
192 return NULL;
195 static bool should_restart_cycle(const struct taprio_sched *q,
196 const struct sched_entry *entry)
198 WARN_ON(!entry);
200 return list_is_last(&entry->list, &q->entries);
203 static enum hrtimer_restart advance_sched(struct hrtimer *timer)
205 struct taprio_sched *q = container_of(timer, struct taprio_sched,
206 advance_timer);
207 struct sched_entry *entry, *next;
208 struct Qdisc *sch = q->root;
209 ktime_t close_time;
211 spin_lock(&q->current_entry_lock);
212 entry = rcu_dereference_protected(q->current_entry,
213 lockdep_is_held(&q->current_entry_lock));
215 /* This is the case that it's the first time that the schedule
216 * runs, so it only happens once per schedule. The first entry
217 * is pre-calculated during the schedule initialization.
219 if (unlikely(!entry)) {
220 next = list_first_entry(&q->entries, struct sched_entry,
221 list);
222 close_time = next->close_time;
223 goto first_run;
226 if (should_restart_cycle(q, entry))
227 next = list_first_entry(&q->entries, struct sched_entry,
228 list);
229 else
230 next = list_next_entry(entry, list);
232 close_time = ktime_add_ns(entry->close_time, next->interval);
234 next->close_time = close_time;
235 atomic_set(&next->budget,
236 (next->interval * 1000) / q->picos_per_byte);
238 first_run:
239 rcu_assign_pointer(q->current_entry, next);
240 spin_unlock(&q->current_entry_lock);
242 hrtimer_set_expires(&q->advance_timer, close_time);
244 rcu_read_lock();
245 __netif_schedule(sch);
246 rcu_read_unlock();
248 return HRTIMER_RESTART;
251 static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
252 [TCA_TAPRIO_SCHED_ENTRY_INDEX] = { .type = NLA_U32 },
253 [TCA_TAPRIO_SCHED_ENTRY_CMD] = { .type = NLA_U8 },
254 [TCA_TAPRIO_SCHED_ENTRY_GATE_MASK] = { .type = NLA_U32 },
255 [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] = { .type = NLA_U32 },
258 static const struct nla_policy entry_list_policy[TCA_TAPRIO_SCHED_MAX + 1] = {
259 [TCA_TAPRIO_SCHED_ENTRY] = { .type = NLA_NESTED },
262 static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
263 [TCA_TAPRIO_ATTR_PRIOMAP] = {
264 .len = sizeof(struct tc_mqprio_qopt)
266 [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST] = { .type = NLA_NESTED },
267 [TCA_TAPRIO_ATTR_SCHED_BASE_TIME] = { .type = NLA_S64 },
268 [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] = { .type = NLA_NESTED },
269 [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 },
272 static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry,
273 struct netlink_ext_ack *extack)
275 u32 interval = 0;
277 if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD])
278 entry->command = nla_get_u8(
279 tb[TCA_TAPRIO_SCHED_ENTRY_CMD]);
281 if (tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK])
282 entry->gate_mask = nla_get_u32(
283 tb[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK]);
285 if (tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL])
286 interval = nla_get_u32(
287 tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]);
289 if (interval == 0) {
290 NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
291 return -EINVAL;
294 entry->interval = interval;
296 return 0;
299 static int parse_sched_entry(struct nlattr *n, struct sched_entry *entry,
300 int index, struct netlink_ext_ack *extack)
302 struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { };
303 int err;
305 err = nla_parse_nested(tb, TCA_TAPRIO_SCHED_ENTRY_MAX, n,
306 entry_policy, NULL);
307 if (err < 0) {
308 NL_SET_ERR_MSG(extack, "Could not parse nested entry");
309 return -EINVAL;
312 entry->index = index;
314 return fill_sched_entry(tb, entry, extack);
317 /* Returns the number of entries in case of success */
318 static int parse_sched_single_entry(struct nlattr *n,
319 struct taprio_sched *q,
320 struct netlink_ext_ack *extack)
322 struct nlattr *tb_entry[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { };
323 struct nlattr *tb_list[TCA_TAPRIO_SCHED_MAX + 1] = { };
324 struct sched_entry *entry;
325 bool found = false;
326 u32 index;
327 int err;
329 err = nla_parse_nested(tb_list, TCA_TAPRIO_SCHED_MAX,
330 n, entry_list_policy, NULL);
331 if (err < 0) {
332 NL_SET_ERR_MSG(extack, "Could not parse nested entry");
333 return -EINVAL;
336 if (!tb_list[TCA_TAPRIO_SCHED_ENTRY]) {
337 NL_SET_ERR_MSG(extack, "Single-entry must include an entry");
338 return -EINVAL;
341 err = nla_parse_nested(tb_entry, TCA_TAPRIO_SCHED_ENTRY_MAX,
342 tb_list[TCA_TAPRIO_SCHED_ENTRY],
343 entry_policy, NULL);
344 if (err < 0) {
345 NL_SET_ERR_MSG(extack, "Could not parse nested entry");
346 return -EINVAL;
349 if (!tb_entry[TCA_TAPRIO_SCHED_ENTRY_INDEX]) {
350 NL_SET_ERR_MSG(extack, "Entry must specify an index\n");
351 return -EINVAL;
354 index = nla_get_u32(tb_entry[TCA_TAPRIO_SCHED_ENTRY_INDEX]);
355 if (index >= q->num_entries) {
356 NL_SET_ERR_MSG(extack, "Index for single entry exceeds number of entries in schedule");
357 return -EINVAL;
360 list_for_each_entry(entry, &q->entries, list) {
361 if (entry->index == index) {
362 found = true;
363 break;
367 if (!found) {
368 NL_SET_ERR_MSG(extack, "Could not find entry");
369 return -ENOENT;
372 err = fill_sched_entry(tb_entry, entry, extack);
373 if (err < 0)
374 return err;
376 return q->num_entries;
379 static int parse_sched_list(struct nlattr *list,
380 struct taprio_sched *q,
381 struct netlink_ext_ack *extack)
383 struct nlattr *n;
384 int err, rem;
385 int i = 0;
387 if (!list)
388 return -EINVAL;
390 nla_for_each_nested(n, list, rem) {
391 struct sched_entry *entry;
393 if (nla_type(n) != TCA_TAPRIO_SCHED_ENTRY) {
394 NL_SET_ERR_MSG(extack, "Attribute is not of type 'entry'");
395 continue;
398 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
399 if (!entry) {
400 NL_SET_ERR_MSG(extack, "Not enough memory for entry");
401 return -ENOMEM;
404 err = parse_sched_entry(n, entry, i, extack);
405 if (err < 0) {
406 kfree(entry);
407 return err;
410 list_add_tail(&entry->list, &q->entries);
411 i++;
414 q->num_entries = i;
416 return i;
419 /* Returns the number of entries in case of success */
420 static int parse_taprio_opt(struct nlattr **tb, struct taprio_sched *q,
421 struct netlink_ext_ack *extack)
423 int err = 0;
424 int clockid;
426 if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST] &&
427 tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY])
428 return -EINVAL;
430 if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] && q->num_entries == 0)
431 return -EINVAL;
433 if (q->clockid == -1 && !tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID])
434 return -EINVAL;
436 if (tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME])
437 q->base_time = nla_get_s64(
438 tb[TCA_TAPRIO_ATTR_SCHED_BASE_TIME]);
440 if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) {
441 clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]);
443 /* We only support static clockids and we don't allow
444 * for it to be modified after the first init.
446 if (clockid < 0 || (q->clockid != -1 && q->clockid != clockid))
447 return -EINVAL;
449 q->clockid = clockid;
452 if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST])
453 err = parse_sched_list(
454 tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], q, extack);
455 else if (tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY])
456 err = parse_sched_single_entry(
457 tb[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY], q, extack);
459 /* parse_sched_* return the number of entries in the schedule,
460 * a schedule with zero entries is an error.
462 if (err == 0) {
463 NL_SET_ERR_MSG(extack, "The schedule should contain at least one entry");
464 return -EINVAL;
467 return err;
470 static int taprio_parse_mqprio_opt(struct net_device *dev,
471 struct tc_mqprio_qopt *qopt,
472 struct netlink_ext_ack *extack)
474 int i, j;
476 if (!qopt) {
477 NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
478 return -EINVAL;
481 /* Verify num_tc is not out of max range */
482 if (qopt->num_tc > TC_MAX_QUEUE) {
483 NL_SET_ERR_MSG(extack, "Number of traffic classes is outside valid range");
484 return -EINVAL;
487 /* taprio imposes that traffic classes map 1:n to tx queues */
488 if (qopt->num_tc > dev->num_tx_queues) {
489 NL_SET_ERR_MSG(extack, "Number of traffic classes is greater than number of HW queues");
490 return -EINVAL;
493 /* Verify priority mapping uses valid tcs */
494 for (i = 0; i < TC_BITMASK + 1; i++) {
495 if (qopt->prio_tc_map[i] >= qopt->num_tc) {
496 NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping");
497 return -EINVAL;
501 for (i = 0; i < qopt->num_tc; i++) {
502 unsigned int last = qopt->offset[i] + qopt->count[i];
504 /* Verify the queue count is in tx range being equal to the
505 * real_num_tx_queues indicates the last queue is in use.
507 if (qopt->offset[i] >= dev->num_tx_queues ||
508 !qopt->count[i] ||
509 last > dev->real_num_tx_queues) {
510 NL_SET_ERR_MSG(extack, "Invalid queue in traffic class to queue mapping");
511 return -EINVAL;
514 /* Verify that the offset and counts do not overlap */
515 for (j = i + 1; j < qopt->num_tc; j++) {
516 if (last > qopt->offset[j]) {
517 NL_SET_ERR_MSG(extack, "Detected overlap in the traffic class to queue mapping");
518 return -EINVAL;
523 return 0;
526 static ktime_t taprio_get_start_time(struct Qdisc *sch)
528 struct taprio_sched *q = qdisc_priv(sch);
529 struct sched_entry *entry;
530 ktime_t now, base, cycle;
531 s64 n;
533 base = ns_to_ktime(q->base_time);
534 cycle = 0;
536 /* Calculate the cycle_time, by summing all the intervals.
538 list_for_each_entry(entry, &q->entries, list)
539 cycle = ktime_add_ns(cycle, entry->interval);
541 if (!cycle)
542 return base;
544 now = q->get_time();
546 if (ktime_after(base, now))
547 return base;
549 /* Schedule the start time for the beginning of the next
550 * cycle.
552 n = div64_s64(ktime_sub_ns(now, base), cycle);
554 return ktime_add_ns(base, (n + 1) * cycle);
557 static void taprio_start_sched(struct Qdisc *sch, ktime_t start)
559 struct taprio_sched *q = qdisc_priv(sch);
560 struct sched_entry *first;
561 unsigned long flags;
563 spin_lock_irqsave(&q->current_entry_lock, flags);
565 first = list_first_entry(&q->entries, struct sched_entry,
566 list);
568 first->close_time = ktime_add_ns(start, first->interval);
569 atomic_set(&first->budget,
570 (first->interval * 1000) / q->picos_per_byte);
571 rcu_assign_pointer(q->current_entry, NULL);
573 spin_unlock_irqrestore(&q->current_entry_lock, flags);
575 hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS);
578 static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
579 struct netlink_ext_ack *extack)
581 struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { };
582 struct taprio_sched *q = qdisc_priv(sch);
583 struct net_device *dev = qdisc_dev(sch);
584 struct tc_mqprio_qopt *mqprio = NULL;
585 struct ethtool_link_ksettings ecmd;
586 int i, err, size;
587 s64 link_speed;
588 ktime_t start;
590 err = nla_parse_nested(tb, TCA_TAPRIO_ATTR_MAX, opt,
591 taprio_policy, extack);
592 if (err < 0)
593 return err;
595 err = -EINVAL;
596 if (tb[TCA_TAPRIO_ATTR_PRIOMAP])
597 mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]);
599 err = taprio_parse_mqprio_opt(dev, mqprio, extack);
600 if (err < 0)
601 return err;
603 /* A schedule with less than one entry is an error */
604 size = parse_taprio_opt(tb, q, extack);
605 if (size < 0)
606 return size;
608 hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS);
609 q->advance_timer.function = advance_sched;
611 switch (q->clockid) {
612 case CLOCK_REALTIME:
613 q->get_time = ktime_get_real;
614 break;
615 case CLOCK_MONOTONIC:
616 q->get_time = ktime_get;
617 break;
618 case CLOCK_BOOTTIME:
619 q->get_time = ktime_get_boottime;
620 break;
621 case CLOCK_TAI:
622 q->get_time = ktime_get_clocktai;
623 break;
624 default:
625 return -ENOTSUPP;
628 for (i = 0; i < dev->num_tx_queues; i++) {
629 struct netdev_queue *dev_queue;
630 struct Qdisc *qdisc;
632 dev_queue = netdev_get_tx_queue(dev, i);
633 qdisc = qdisc_create_dflt(dev_queue,
634 &pfifo_qdisc_ops,
635 TC_H_MAKE(TC_H_MAJ(sch->handle),
636 TC_H_MIN(i + 1)),
637 extack);
638 if (!qdisc)
639 return -ENOMEM;
641 if (i < dev->real_num_tx_queues)
642 qdisc_hash_add(qdisc, false);
644 q->qdiscs[i] = qdisc;
647 if (mqprio) {
648 netdev_set_num_tc(dev, mqprio->num_tc);
649 for (i = 0; i < mqprio->num_tc; i++)
650 netdev_set_tc_queue(dev, i,
651 mqprio->count[i],
652 mqprio->offset[i]);
654 /* Always use supplied priority mappings */
655 for (i = 0; i < TC_BITMASK + 1; i++)
656 netdev_set_prio_tc_map(dev, i,
657 mqprio->prio_tc_map[i]);
660 if (!__ethtool_get_link_ksettings(dev, &ecmd))
661 link_speed = ecmd.base.speed;
662 else
663 link_speed = SPEED_1000;
665 q->picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8,
666 link_speed * 1000 * 1000);
668 start = taprio_get_start_time(sch);
669 if (!start)
670 return 0;
672 taprio_start_sched(sch, start);
674 return 0;
677 static void taprio_destroy(struct Qdisc *sch)
679 struct taprio_sched *q = qdisc_priv(sch);
680 struct net_device *dev = qdisc_dev(sch);
681 struct sched_entry *entry, *n;
682 unsigned int i;
684 hrtimer_cancel(&q->advance_timer);
686 if (q->qdiscs) {
687 for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++)
688 qdisc_put(q->qdiscs[i]);
690 kfree(q->qdiscs);
692 q->qdiscs = NULL;
694 netdev_set_num_tc(dev, 0);
696 list_for_each_entry_safe(entry, n, &q->entries, list) {
697 list_del(&entry->list);
698 kfree(entry);
702 static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
703 struct netlink_ext_ack *extack)
705 struct taprio_sched *q = qdisc_priv(sch);
706 struct net_device *dev = qdisc_dev(sch);
708 INIT_LIST_HEAD(&q->entries);
709 spin_lock_init(&q->current_entry_lock);
711 /* We may overwrite the configuration later */
712 hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS);
714 q->root = sch;
716 /* We only support static clockids. Use an invalid value as default
717 * and get the valid one on taprio_change().
719 q->clockid = -1;
721 if (sch->parent != TC_H_ROOT)
722 return -EOPNOTSUPP;
724 if (!netif_is_multiqueue(dev))
725 return -EOPNOTSUPP;
727 /* pre-allocate qdisc, attachment can't fail */
728 q->qdiscs = kcalloc(dev->num_tx_queues,
729 sizeof(q->qdiscs[0]),
730 GFP_KERNEL);
732 if (!q->qdiscs)
733 return -ENOMEM;
735 if (!opt)
736 return -EINVAL;
738 return taprio_change(sch, opt, extack);
741 static struct netdev_queue *taprio_queue_get(struct Qdisc *sch,
742 unsigned long cl)
744 struct net_device *dev = qdisc_dev(sch);
745 unsigned long ntx = cl - 1;
747 if (ntx >= dev->num_tx_queues)
748 return NULL;
750 return netdev_get_tx_queue(dev, ntx);
753 static int taprio_graft(struct Qdisc *sch, unsigned long cl,
754 struct Qdisc *new, struct Qdisc **old,
755 struct netlink_ext_ack *extack)
757 struct taprio_sched *q = qdisc_priv(sch);
758 struct net_device *dev = qdisc_dev(sch);
759 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
761 if (!dev_queue)
762 return -EINVAL;
764 if (dev->flags & IFF_UP)
765 dev_deactivate(dev);
767 *old = q->qdiscs[cl - 1];
768 q->qdiscs[cl - 1] = new;
770 if (new)
771 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
773 if (dev->flags & IFF_UP)
774 dev_activate(dev);
776 return 0;
779 static int dump_entry(struct sk_buff *msg,
780 const struct sched_entry *entry)
782 struct nlattr *item;
784 item = nla_nest_start(msg, TCA_TAPRIO_SCHED_ENTRY);
785 if (!item)
786 return -ENOSPC;
788 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INDEX, entry->index))
789 goto nla_put_failure;
791 if (nla_put_u8(msg, TCA_TAPRIO_SCHED_ENTRY_CMD, entry->command))
792 goto nla_put_failure;
794 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK,
795 entry->gate_mask))
796 goto nla_put_failure;
798 if (nla_put_u32(msg, TCA_TAPRIO_SCHED_ENTRY_INTERVAL,
799 entry->interval))
800 goto nla_put_failure;
802 return nla_nest_end(msg, item);
804 nla_put_failure:
805 nla_nest_cancel(msg, item);
806 return -1;
809 static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
811 struct taprio_sched *q = qdisc_priv(sch);
812 struct net_device *dev = qdisc_dev(sch);
813 struct tc_mqprio_qopt opt = { 0 };
814 struct nlattr *nest, *entry_list;
815 struct sched_entry *entry;
816 unsigned int i;
818 opt.num_tc = netdev_get_num_tc(dev);
819 memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
821 for (i = 0; i < netdev_get_num_tc(dev); i++) {
822 opt.count[i] = dev->tc_to_txq[i].count;
823 opt.offset[i] = dev->tc_to_txq[i].offset;
826 nest = nla_nest_start(skb, TCA_OPTIONS);
827 if (!nest)
828 return -ENOSPC;
830 if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt))
831 goto options_error;
833 if (nla_put_s64(skb, TCA_TAPRIO_ATTR_SCHED_BASE_TIME,
834 q->base_time, TCA_TAPRIO_PAD))
835 goto options_error;
837 if (nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid))
838 goto options_error;
840 entry_list = nla_nest_start(skb, TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST);
841 if (!entry_list)
842 goto options_error;
844 list_for_each_entry(entry, &q->entries, list) {
845 if (dump_entry(skb, entry) < 0)
846 goto options_error;
849 nla_nest_end(skb, entry_list);
851 return nla_nest_end(skb, nest);
853 options_error:
854 nla_nest_cancel(skb, nest);
855 return -1;
858 static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
860 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
862 if (!dev_queue)
863 return NULL;
865 return dev_queue->qdisc_sleeping;
868 static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
870 unsigned int ntx = TC_H_MIN(classid);
872 if (!taprio_queue_get(sch, ntx))
873 return 0;
874 return ntx;
877 static int taprio_dump_class(struct Qdisc *sch, unsigned long cl,
878 struct sk_buff *skb, struct tcmsg *tcm)
880 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
882 tcm->tcm_parent = TC_H_ROOT;
883 tcm->tcm_handle |= TC_H_MIN(cl);
884 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
886 return 0;
889 static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
890 struct gnet_dump *d)
891 __releases(d->lock)
892 __acquires(d->lock)
894 struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
896 sch = dev_queue->qdisc_sleeping;
897 if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
898 gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0)
899 return -1;
900 return 0;
903 static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
905 struct net_device *dev = qdisc_dev(sch);
906 unsigned long ntx;
908 if (arg->stop)
909 return;
911 arg->count = arg->skip;
912 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
913 if (arg->fn(sch, ntx + 1, arg) < 0) {
914 arg->stop = 1;
915 break;
917 arg->count++;
921 static struct netdev_queue *taprio_select_queue(struct Qdisc *sch,
922 struct tcmsg *tcm)
924 return taprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
927 static const struct Qdisc_class_ops taprio_class_ops = {
928 .graft = taprio_graft,
929 .leaf = taprio_leaf,
930 .find = taprio_find,
931 .walk = taprio_walk,
932 .dump = taprio_dump_class,
933 .dump_stats = taprio_dump_class_stats,
934 .select_queue = taprio_select_queue,
937 static struct Qdisc_ops taprio_qdisc_ops __read_mostly = {
938 .cl_ops = &taprio_class_ops,
939 .id = "taprio",
940 .priv_size = sizeof(struct taprio_sched),
941 .init = taprio_init,
942 .destroy = taprio_destroy,
943 .peek = taprio_peek,
944 .dequeue = taprio_dequeue,
945 .enqueue = taprio_enqueue,
946 .dump = taprio_dump,
947 .owner = THIS_MODULE,
950 static int __init taprio_module_init(void)
952 return register_qdisc(&taprio_qdisc_ops);
955 static void __exit taprio_module_exit(void)
957 unregister_qdisc(&taprio_qdisc_ops);
960 module_init(taprio_module_init);
961 module_exit(taprio_module_exit);
962 MODULE_LICENSE("GPL");