1 // SPDX-License-Identifier: GPL-2.0
3 /* net/sched/sch_taprio.c Time Aware Priority Scheduler
5 * Authors: Vinicius Costa Gomes <vinicius.gomes@intel.com>
9 #include <linux/ethtool.h>
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/kernel.h>
13 #include <linux/string.h>
14 #include <linux/list.h>
15 #include <linux/errno.h>
16 #include <linux/skbuff.h>
17 #include <linux/math64.h>
18 #include <linux/module.h>
19 #include <linux/spinlock.h>
20 #include <linux/rcupdate.h>
21 #include <net/netlink.h>
22 #include <net/pkt_sched.h>
23 #include <net/pkt_cls.h>
24 #include <net/sch_generic.h>
28 static LIST_HEAD(taprio_list
);
29 static DEFINE_SPINLOCK(taprio_list_lock
);
31 #define TAPRIO_ALL_GATES_OPEN -1
33 #define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)
34 #define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
35 #define TAPRIO_FLAGS_INVALID U32_MAX
38 struct list_head list
;
40 /* The instant that this entry "closes" and the next one
41 * should open, the qdisc will make some effort so that no
42 * packet leaves after this time.
53 struct sched_gate_list
{
55 struct list_head entries
;
57 ktime_t cycle_close_time
;
59 s64 cycle_time_extension
;
64 struct Qdisc
**qdiscs
;
67 enum tk_offsets tk_offset
;
69 atomic64_t picos_per_byte
; /* Using picoseconds because for 10Gbps+
70 * speeds it's sub-nanoseconds per byte
73 /* Protects the update side of the RCU protected current_entry */
74 spinlock_t current_entry_lock
;
75 struct sched_entry __rcu
*current_entry
;
76 struct sched_gate_list __rcu
*oper_sched
;
77 struct sched_gate_list __rcu
*admin_sched
;
78 struct hrtimer advance_timer
;
79 struct list_head taprio_list
;
80 struct sk_buff
*(*dequeue
)(struct Qdisc
*sch
);
81 struct sk_buff
*(*peek
)(struct Qdisc
*sch
);
85 struct __tc_taprio_qopt_offload
{
87 struct tc_taprio_qopt_offload offload
;
90 static ktime_t
sched_base_time(const struct sched_gate_list
*sched
)
95 return ns_to_ktime(sched
->base_time
);
98 static ktime_t
taprio_get_time(struct taprio_sched
*q
)
100 ktime_t mono
= ktime_get();
102 switch (q
->tk_offset
) {
106 return ktime_mono_to_any(mono
, q
->tk_offset
);
112 static void taprio_free_sched_cb(struct rcu_head
*head
)
114 struct sched_gate_list
*sched
= container_of(head
, struct sched_gate_list
, rcu
);
115 struct sched_entry
*entry
, *n
;
120 list_for_each_entry_safe(entry
, n
, &sched
->entries
, list
) {
121 list_del(&entry
->list
);
128 static void switch_schedules(struct taprio_sched
*q
,
129 struct sched_gate_list
**admin
,
130 struct sched_gate_list
**oper
)
132 rcu_assign_pointer(q
->oper_sched
, *admin
);
133 rcu_assign_pointer(q
->admin_sched
, NULL
);
136 call_rcu(&(*oper
)->rcu
, taprio_free_sched_cb
);
142 /* Get how much time has been already elapsed in the current cycle. */
143 static s32
get_cycle_time_elapsed(struct sched_gate_list
*sched
, ktime_t time
)
145 ktime_t time_since_sched_start
;
148 time_since_sched_start
= ktime_sub(time
, sched
->base_time
);
149 div_s64_rem(time_since_sched_start
, sched
->cycle_time
, &time_elapsed
);
154 static ktime_t
get_interval_end_time(struct sched_gate_list
*sched
,
155 struct sched_gate_list
*admin
,
156 struct sched_entry
*entry
,
159 s32 cycle_elapsed
= get_cycle_time_elapsed(sched
, intv_start
);
160 ktime_t intv_end
, cycle_ext_end
, cycle_end
;
162 cycle_end
= ktime_add_ns(intv_start
, sched
->cycle_time
- cycle_elapsed
);
163 intv_end
= ktime_add_ns(intv_start
, entry
->interval
);
164 cycle_ext_end
= ktime_add(cycle_end
, sched
->cycle_time_extension
);
166 if (ktime_before(intv_end
, cycle_end
))
168 else if (admin
&& admin
!= sched
&&
169 ktime_after(admin
->base_time
, cycle_end
) &&
170 ktime_before(admin
->base_time
, cycle_ext_end
))
171 return admin
->base_time
;
176 static int length_to_duration(struct taprio_sched
*q
, int len
)
178 return div_u64(len
* atomic64_read(&q
->picos_per_byte
), 1000);
181 /* Returns the entry corresponding to next available interval. If
182 * validate_interval is set, it only validates whether the timestamp occurs
183 * when the gate corresponding to the skb's traffic class is open.
185 static struct sched_entry
*find_entry_to_transmit(struct sk_buff
*skb
,
187 struct sched_gate_list
*sched
,
188 struct sched_gate_list
*admin
,
190 ktime_t
*interval_start
,
191 ktime_t
*interval_end
,
192 bool validate_interval
)
194 ktime_t curr_intv_start
, curr_intv_end
, cycle_end
, packet_transmit_time
;
195 ktime_t earliest_txtime
= KTIME_MAX
, txtime
, cycle
, transmit_end_time
;
196 struct sched_entry
*entry
= NULL
, *entry_found
= NULL
;
197 struct taprio_sched
*q
= qdisc_priv(sch
);
198 struct net_device
*dev
= qdisc_dev(sch
);
199 bool entry_available
= false;
203 tc
= netdev_get_prio_tc_map(dev
, skb
->priority
);
204 packet_transmit_time
= length_to_duration(q
, qdisc_pkt_len(skb
));
212 cycle
= sched
->cycle_time
;
213 cycle_elapsed
= get_cycle_time_elapsed(sched
, time
);
214 curr_intv_end
= ktime_sub_ns(time
, cycle_elapsed
);
215 cycle_end
= ktime_add_ns(curr_intv_end
, cycle
);
217 list_for_each_entry(entry
, &sched
->entries
, list
) {
218 curr_intv_start
= curr_intv_end
;
219 curr_intv_end
= get_interval_end_time(sched
, admin
, entry
,
222 if (ktime_after(curr_intv_start
, cycle_end
))
225 if (!(entry
->gate_mask
& BIT(tc
)) ||
226 packet_transmit_time
> entry
->interval
)
229 txtime
= entry
->next_txtime
;
231 if (ktime_before(txtime
, time
) || validate_interval
) {
232 transmit_end_time
= ktime_add_ns(time
, packet_transmit_time
);
233 if ((ktime_before(curr_intv_start
, time
) &&
234 ktime_before(transmit_end_time
, curr_intv_end
)) ||
235 (ktime_after(curr_intv_start
, time
) && !validate_interval
)) {
237 *interval_start
= curr_intv_start
;
238 *interval_end
= curr_intv_end
;
240 } else if (!entry_available
&& !validate_interval
) {
241 /* Here, we are just trying to find out the
242 * first available interval in the next cycle.
246 *interval_start
= ktime_add_ns(curr_intv_start
, cycle
);
247 *interval_end
= ktime_add_ns(curr_intv_end
, cycle
);
249 } else if (ktime_before(txtime
, earliest_txtime
) &&
251 earliest_txtime
= txtime
;
253 n
= div_s64(ktime_sub(txtime
, curr_intv_start
), cycle
);
254 *interval_start
= ktime_add(curr_intv_start
, n
* cycle
);
255 *interval_end
= ktime_add(curr_intv_end
, n
* cycle
);
262 static bool is_valid_interval(struct sk_buff
*skb
, struct Qdisc
*sch
)
264 struct taprio_sched
*q
= qdisc_priv(sch
);
265 struct sched_gate_list
*sched
, *admin
;
266 ktime_t interval_start
, interval_end
;
267 struct sched_entry
*entry
;
270 sched
= rcu_dereference(q
->oper_sched
);
271 admin
= rcu_dereference(q
->admin_sched
);
273 entry
= find_entry_to_transmit(skb
, sch
, sched
, admin
, skb
->tstamp
,
274 &interval_start
, &interval_end
, true);
280 static bool taprio_flags_valid(u32 flags
)
282 /* Make sure no other flag bits are set. */
283 if (flags
& ~(TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST
|
284 TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD
))
286 /* txtime-assist and full offload are mutually exclusive */
287 if ((flags
& TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST
) &&
288 (flags
& TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD
))
293 /* This returns the tstamp value set by TCP in terms of the set clock. */
294 static ktime_t
get_tcp_tstamp(struct taprio_sched
*q
, struct sk_buff
*skb
)
296 unsigned int offset
= skb_network_offset(skb
);
297 const struct ipv6hdr
*ipv6h
;
298 const struct iphdr
*iph
;
299 struct ipv6hdr _ipv6h
;
301 ipv6h
= skb_header_pointer(skb
, offset
, sizeof(_ipv6h
), &_ipv6h
);
305 if (ipv6h
->version
== 4) {
306 iph
= (struct iphdr
*)ipv6h
;
307 offset
+= iph
->ihl
* 4;
309 /* special-case 6in4 tunnelling, as that is a common way to get
310 * v6 connectivity in the home
312 if (iph
->protocol
== IPPROTO_IPV6
) {
313 ipv6h
= skb_header_pointer(skb
, offset
,
314 sizeof(_ipv6h
), &_ipv6h
);
316 if (!ipv6h
|| ipv6h
->nexthdr
!= IPPROTO_TCP
)
318 } else if (iph
->protocol
!= IPPROTO_TCP
) {
321 } else if (ipv6h
->version
== 6 && ipv6h
->nexthdr
!= IPPROTO_TCP
) {
325 return ktime_mono_to_any(skb
->skb_mstamp_ns
, q
->tk_offset
);
328 /* There are a few scenarios where we will have to modify the txtime from
329 * what is read from next_txtime in sched_entry. They are:
330 * 1. If txtime is in the past,
331 * a. The gate for the traffic class is currently open and packet can be
332 * transmitted before it closes, schedule the packet right away.
333 * b. If the gate corresponding to the traffic class is going to open later
334 * in the cycle, set the txtime of packet to the interval start.
335 * 2. If txtime is in the future, there are packets corresponding to the
336 * current traffic class waiting to be transmitted. So, the following
337 * possibilities exist:
338 * a. We can transmit the packet before the window containing the txtime
340 * b. The window might close before the transmission can be completed
341 * successfully. So, schedule the packet in the next open window.
343 static long get_packet_txtime(struct sk_buff
*skb
, struct Qdisc
*sch
)
345 ktime_t transmit_end_time
, interval_end
, interval_start
, tcp_tstamp
;
346 struct taprio_sched
*q
= qdisc_priv(sch
);
347 struct sched_gate_list
*sched
, *admin
;
348 ktime_t minimum_time
, now
, txtime
;
349 int len
, packet_transmit_time
;
350 struct sched_entry
*entry
;
353 now
= taprio_get_time(q
);
354 minimum_time
= ktime_add_ns(now
, q
->txtime_delay
);
356 tcp_tstamp
= get_tcp_tstamp(q
, skb
);
357 minimum_time
= max_t(ktime_t
, minimum_time
, tcp_tstamp
);
360 admin
= rcu_dereference(q
->admin_sched
);
361 sched
= rcu_dereference(q
->oper_sched
);
362 if (admin
&& ktime_after(minimum_time
, admin
->base_time
))
363 switch_schedules(q
, &admin
, &sched
);
365 /* Until the schedule starts, all the queues are open */
366 if (!sched
|| ktime_before(minimum_time
, sched
->base_time
)) {
367 txtime
= minimum_time
;
371 len
= qdisc_pkt_len(skb
);
372 packet_transmit_time
= length_to_duration(q
, len
);
377 entry
= find_entry_to_transmit(skb
, sch
, sched
, admin
,
379 &interval_start
, &interval_end
,
386 txtime
= entry
->next_txtime
;
387 txtime
= max_t(ktime_t
, txtime
, minimum_time
);
388 txtime
= max_t(ktime_t
, txtime
, interval_start
);
390 if (admin
&& admin
!= sched
&&
391 ktime_after(txtime
, admin
->base_time
)) {
397 transmit_end_time
= ktime_add(txtime
, packet_transmit_time
);
398 minimum_time
= transmit_end_time
;
400 /* Update the txtime of current entry to the next time it's
403 if (ktime_after(transmit_end_time
, interval_end
))
404 entry
->next_txtime
= ktime_add(interval_start
, sched
->cycle_time
);
405 } while (sched_changed
|| ktime_after(transmit_end_time
, interval_end
));
407 entry
->next_txtime
= transmit_end_time
;
414 static int taprio_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
,
415 struct sk_buff
**to_free
)
417 struct taprio_sched
*q
= qdisc_priv(sch
);
421 queue
= skb_get_queue_mapping(skb
);
423 child
= q
->qdiscs
[queue
];
424 if (unlikely(!child
))
425 return qdisc_drop(skb
, sch
, to_free
);
427 if (skb
->sk
&& sock_flag(skb
->sk
, SOCK_TXTIME
)) {
428 if (!is_valid_interval(skb
, sch
))
429 return qdisc_drop(skb
, sch
, to_free
);
430 } else if (TXTIME_ASSIST_IS_ENABLED(q
->flags
)) {
431 skb
->tstamp
= get_packet_txtime(skb
, sch
);
433 return qdisc_drop(skb
, sch
, to_free
);
436 qdisc_qstats_backlog_inc(sch
, skb
);
439 return qdisc_enqueue(skb
, child
, to_free
);
442 static struct sk_buff
*taprio_peek_soft(struct Qdisc
*sch
)
444 struct taprio_sched
*q
= qdisc_priv(sch
);
445 struct net_device
*dev
= qdisc_dev(sch
);
446 struct sched_entry
*entry
;
452 entry
= rcu_dereference(q
->current_entry
);
453 gate_mask
= entry
? entry
->gate_mask
: TAPRIO_ALL_GATES_OPEN
;
459 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
460 struct Qdisc
*child
= q
->qdiscs
[i
];
464 if (unlikely(!child
))
467 skb
= child
->ops
->peek(child
);
471 if (TXTIME_ASSIST_IS_ENABLED(q
->flags
))
474 prio
= skb
->priority
;
475 tc
= netdev_get_prio_tc_map(dev
, prio
);
477 if (!(gate_mask
& BIT(tc
)))
486 static struct sk_buff
*taprio_peek_offload(struct Qdisc
*sch
)
488 struct taprio_sched
*q
= qdisc_priv(sch
);
489 struct net_device
*dev
= qdisc_dev(sch
);
493 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
494 struct Qdisc
*child
= q
->qdiscs
[i
];
496 if (unlikely(!child
))
499 skb
= child
->ops
->peek(child
);
509 static struct sk_buff
*taprio_peek(struct Qdisc
*sch
)
511 struct taprio_sched
*q
= qdisc_priv(sch
);
516 static void taprio_set_budget(struct taprio_sched
*q
, struct sched_entry
*entry
)
518 atomic_set(&entry
->budget
,
519 div64_u64((u64
)entry
->interval
* 1000,
520 atomic64_read(&q
->picos_per_byte
)));
523 static struct sk_buff
*taprio_dequeue_soft(struct Qdisc
*sch
)
525 struct taprio_sched
*q
= qdisc_priv(sch
);
526 struct net_device
*dev
= qdisc_dev(sch
);
527 struct sk_buff
*skb
= NULL
;
528 struct sched_entry
*entry
;
533 entry
= rcu_dereference(q
->current_entry
);
534 /* if there's no entry, it means that the schedule didn't
535 * start yet, so force all gates to be open, this is in
536 * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5
539 gate_mask
= entry
? entry
->gate_mask
: TAPRIO_ALL_GATES_OPEN
;
544 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
545 struct Qdisc
*child
= q
->qdiscs
[i
];
551 if (unlikely(!child
))
554 if (TXTIME_ASSIST_IS_ENABLED(q
->flags
)) {
555 skb
= child
->ops
->dequeue(child
);
561 skb
= child
->ops
->peek(child
);
565 prio
= skb
->priority
;
566 tc
= netdev_get_prio_tc_map(dev
, prio
);
568 if (!(gate_mask
& BIT(tc
))) {
573 len
= qdisc_pkt_len(skb
);
574 guard
= ktime_add_ns(taprio_get_time(q
),
575 length_to_duration(q
, len
));
577 /* In the case that there's no gate entry, there's no
580 if (gate_mask
!= TAPRIO_ALL_GATES_OPEN
&&
581 ktime_after(guard
, entry
->close_time
)) {
586 /* ... and no budget. */
587 if (gate_mask
!= TAPRIO_ALL_GATES_OPEN
&&
588 atomic_sub_return(len
, &entry
->budget
) < 0) {
593 skb
= child
->ops
->dequeue(child
);
598 qdisc_bstats_update(sch
, skb
);
599 qdisc_qstats_backlog_dec(sch
, skb
);
611 static struct sk_buff
*taprio_dequeue_offload(struct Qdisc
*sch
)
613 struct taprio_sched
*q
= qdisc_priv(sch
);
614 struct net_device
*dev
= qdisc_dev(sch
);
618 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
619 struct Qdisc
*child
= q
->qdiscs
[i
];
621 if (unlikely(!child
))
624 skb
= child
->ops
->dequeue(child
);
628 qdisc_bstats_update(sch
, skb
);
629 qdisc_qstats_backlog_dec(sch
, skb
);
638 static struct sk_buff
*taprio_dequeue(struct Qdisc
*sch
)
640 struct taprio_sched
*q
= qdisc_priv(sch
);
642 return q
->dequeue(sch
);
645 static bool should_restart_cycle(const struct sched_gate_list
*oper
,
646 const struct sched_entry
*entry
)
648 if (list_is_last(&entry
->list
, &oper
->entries
))
651 if (ktime_compare(entry
->close_time
, oper
->cycle_close_time
) == 0)
657 static bool should_change_schedules(const struct sched_gate_list
*admin
,
658 const struct sched_gate_list
*oper
,
661 ktime_t next_base_time
, extension_time
;
666 next_base_time
= sched_base_time(admin
);
668 /* This is the simple case, the close_time would fall after
669 * the next schedule base_time.
671 if (ktime_compare(next_base_time
, close_time
) <= 0)
674 /* This is the cycle_time_extension case, if the close_time
675 * plus the amount that can be extended would fall after the
676 * next schedule base_time, we can extend the current schedule
679 extension_time
= ktime_add_ns(close_time
, oper
->cycle_time_extension
);
681 /* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about
682 * how precisely the extension should be made. So after
683 * conformance testing, this logic may change.
685 if (ktime_compare(next_base_time
, extension_time
) <= 0)
691 static enum hrtimer_restart
advance_sched(struct hrtimer
*timer
)
693 struct taprio_sched
*q
= container_of(timer
, struct taprio_sched
,
695 struct sched_gate_list
*oper
, *admin
;
696 struct sched_entry
*entry
, *next
;
697 struct Qdisc
*sch
= q
->root
;
700 spin_lock(&q
->current_entry_lock
);
701 entry
= rcu_dereference_protected(q
->current_entry
,
702 lockdep_is_held(&q
->current_entry_lock
));
703 oper
= rcu_dereference_protected(q
->oper_sched
,
704 lockdep_is_held(&q
->current_entry_lock
));
705 admin
= rcu_dereference_protected(q
->admin_sched
,
706 lockdep_is_held(&q
->current_entry_lock
));
709 switch_schedules(q
, &admin
, &oper
);
711 /* This can happen in two cases: 1. this is the very first run
712 * of this function (i.e. we weren't running any schedule
713 * previously); 2. The previous schedule just ended. The first
714 * entry of all schedules are pre-calculated during the
715 * schedule initialization.
717 if (unlikely(!entry
|| entry
->close_time
== oper
->base_time
)) {
718 next
= list_first_entry(&oper
->entries
, struct sched_entry
,
720 close_time
= next
->close_time
;
724 if (should_restart_cycle(oper
, entry
)) {
725 next
= list_first_entry(&oper
->entries
, struct sched_entry
,
727 oper
->cycle_close_time
= ktime_add_ns(oper
->cycle_close_time
,
730 next
= list_next_entry(entry
, list
);
733 close_time
= ktime_add_ns(entry
->close_time
, next
->interval
);
734 close_time
= min_t(ktime_t
, close_time
, oper
->cycle_close_time
);
736 if (should_change_schedules(admin
, oper
, close_time
)) {
737 /* Set things so the next time this runs, the new
740 close_time
= sched_base_time(admin
);
741 switch_schedules(q
, &admin
, &oper
);
744 next
->close_time
= close_time
;
745 taprio_set_budget(q
, next
);
748 rcu_assign_pointer(q
->current_entry
, next
);
749 spin_unlock(&q
->current_entry_lock
);
751 hrtimer_set_expires(&q
->advance_timer
, close_time
);
754 __netif_schedule(sch
);
757 return HRTIMER_RESTART
;
760 static const struct nla_policy entry_policy
[TCA_TAPRIO_SCHED_ENTRY_MAX
+ 1] = {
761 [TCA_TAPRIO_SCHED_ENTRY_INDEX
] = { .type
= NLA_U32
},
762 [TCA_TAPRIO_SCHED_ENTRY_CMD
] = { .type
= NLA_U8
},
763 [TCA_TAPRIO_SCHED_ENTRY_GATE_MASK
] = { .type
= NLA_U32
},
764 [TCA_TAPRIO_SCHED_ENTRY_INTERVAL
] = { .type
= NLA_U32
},
767 static const struct nla_policy taprio_policy
[TCA_TAPRIO_ATTR_MAX
+ 1] = {
768 [TCA_TAPRIO_ATTR_PRIOMAP
] = {
769 .len
= sizeof(struct tc_mqprio_qopt
)
771 [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST
] = { .type
= NLA_NESTED
},
772 [TCA_TAPRIO_ATTR_SCHED_BASE_TIME
] = { .type
= NLA_S64
},
773 [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY
] = { .type
= NLA_NESTED
},
774 [TCA_TAPRIO_ATTR_SCHED_CLOCKID
] = { .type
= NLA_S32
},
775 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME
] = { .type
= NLA_S64
},
776 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION
] = { .type
= NLA_S64
},
777 [TCA_TAPRIO_ATTR_FLAGS
] = { .type
= NLA_U32
},
778 [TCA_TAPRIO_ATTR_TXTIME_DELAY
] = { .type
= NLA_U32
},
781 static int fill_sched_entry(struct taprio_sched
*q
, struct nlattr
**tb
,
782 struct sched_entry
*entry
,
783 struct netlink_ext_ack
*extack
)
785 int min_duration
= length_to_duration(q
, ETH_ZLEN
);
788 if (tb
[TCA_TAPRIO_SCHED_ENTRY_CMD
])
789 entry
->command
= nla_get_u8(
790 tb
[TCA_TAPRIO_SCHED_ENTRY_CMD
]);
792 if (tb
[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK
])
793 entry
->gate_mask
= nla_get_u32(
794 tb
[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK
]);
796 if (tb
[TCA_TAPRIO_SCHED_ENTRY_INTERVAL
])
797 interval
= nla_get_u32(
798 tb
[TCA_TAPRIO_SCHED_ENTRY_INTERVAL
]);
800 /* The interval should allow at least the minimum ethernet
803 if (interval
< min_duration
) {
804 NL_SET_ERR_MSG(extack
, "Invalid interval for schedule entry");
808 entry
->interval
= interval
;
813 static int parse_sched_entry(struct taprio_sched
*q
, struct nlattr
*n
,
814 struct sched_entry
*entry
, int index
,
815 struct netlink_ext_ack
*extack
)
817 struct nlattr
*tb
[TCA_TAPRIO_SCHED_ENTRY_MAX
+ 1] = { };
820 err
= nla_parse_nested_deprecated(tb
, TCA_TAPRIO_SCHED_ENTRY_MAX
, n
,
823 NL_SET_ERR_MSG(extack
, "Could not parse nested entry");
827 entry
->index
= index
;
829 return fill_sched_entry(q
, tb
, entry
, extack
);
832 static int parse_sched_list(struct taprio_sched
*q
, struct nlattr
*list
,
833 struct sched_gate_list
*sched
,
834 struct netlink_ext_ack
*extack
)
843 nla_for_each_nested(n
, list
, rem
) {
844 struct sched_entry
*entry
;
846 if (nla_type(n
) != TCA_TAPRIO_SCHED_ENTRY
) {
847 NL_SET_ERR_MSG(extack
, "Attribute is not of type 'entry'");
851 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
853 NL_SET_ERR_MSG(extack
, "Not enough memory for entry");
857 err
= parse_sched_entry(q
, n
, entry
, i
, extack
);
863 list_add_tail(&entry
->list
, &sched
->entries
);
867 sched
->num_entries
= i
;
872 static int parse_taprio_schedule(struct taprio_sched
*q
, struct nlattr
**tb
,
873 struct sched_gate_list
*new,
874 struct netlink_ext_ack
*extack
)
878 if (tb
[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY
]) {
879 NL_SET_ERR_MSG(extack
, "Adding a single entry is not supported");
883 if (tb
[TCA_TAPRIO_ATTR_SCHED_BASE_TIME
])
884 new->base_time
= nla_get_s64(tb
[TCA_TAPRIO_ATTR_SCHED_BASE_TIME
]);
886 if (tb
[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION
])
887 new->cycle_time_extension
= nla_get_s64(tb
[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION
]);
889 if (tb
[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME
])
890 new->cycle_time
= nla_get_s64(tb
[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME
]);
892 if (tb
[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST
])
893 err
= parse_sched_list(q
, tb
[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST
],
898 if (!new->cycle_time
) {
899 struct sched_entry
*entry
;
902 list_for_each_entry(entry
, &new->entries
, list
)
903 cycle
= ktime_add_ns(cycle
, entry
->interval
);
904 new->cycle_time
= cycle
;
910 static int taprio_parse_mqprio_opt(struct net_device
*dev
,
911 struct tc_mqprio_qopt
*qopt
,
912 struct netlink_ext_ack
*extack
,
917 if (!qopt
&& !dev
->num_tc
) {
918 NL_SET_ERR_MSG(extack
, "'mqprio' configuration is necessary");
922 /* If num_tc is already set, it means that the user already
923 * configured the mqprio part
928 /* Verify num_tc is not out of max range */
929 if (qopt
->num_tc
> TC_MAX_QUEUE
) {
930 NL_SET_ERR_MSG(extack
, "Number of traffic classes is outside valid range");
934 /* taprio imposes that traffic classes map 1:n to tx queues */
935 if (qopt
->num_tc
> dev
->num_tx_queues
) {
936 NL_SET_ERR_MSG(extack
, "Number of traffic classes is greater than number of HW queues");
940 /* Verify priority mapping uses valid tcs */
941 for (i
= 0; i
<= TC_BITMASK
; i
++) {
942 if (qopt
->prio_tc_map
[i
] >= qopt
->num_tc
) {
943 NL_SET_ERR_MSG(extack
, "Invalid traffic class in priority to traffic class mapping");
948 for (i
= 0; i
< qopt
->num_tc
; i
++) {
949 unsigned int last
= qopt
->offset
[i
] + qopt
->count
[i
];
951 /* Verify the queue count is in tx range being equal to the
952 * real_num_tx_queues indicates the last queue is in use.
954 if (qopt
->offset
[i
] >= dev
->num_tx_queues
||
956 last
> dev
->real_num_tx_queues
) {
957 NL_SET_ERR_MSG(extack
, "Invalid queue in traffic class to queue mapping");
961 if (TXTIME_ASSIST_IS_ENABLED(taprio_flags
))
964 /* Verify that the offset and counts do not overlap */
965 for (j
= i
+ 1; j
< qopt
->num_tc
; j
++) {
966 if (last
> qopt
->offset
[j
]) {
967 NL_SET_ERR_MSG(extack
, "Detected overlap in the traffic class to queue mapping");
976 static int taprio_get_start_time(struct Qdisc
*sch
,
977 struct sched_gate_list
*sched
,
980 struct taprio_sched
*q
= qdisc_priv(sch
);
981 ktime_t now
, base
, cycle
;
984 base
= sched_base_time(sched
);
985 now
= taprio_get_time(q
);
987 if (ktime_after(base
, now
)) {
992 cycle
= sched
->cycle_time
;
994 /* The qdisc is expected to have at least one sched_entry. Moreover,
995 * any entry must have 'interval' > 0. Thus if the cycle time is zero,
996 * something went really wrong. In that case, we should warn about this
997 * inconsistent state and return error.
1002 /* Schedule the start time for the beginning of the next
1005 n
= div64_s64(ktime_sub_ns(now
, base
), cycle
);
1006 *start
= ktime_add_ns(base
, (n
+ 1) * cycle
);
1010 static void setup_first_close_time(struct taprio_sched
*q
,
1011 struct sched_gate_list
*sched
, ktime_t base
)
1013 struct sched_entry
*first
;
1016 first
= list_first_entry(&sched
->entries
,
1017 struct sched_entry
, list
);
1019 cycle
= sched
->cycle_time
;
1021 /* FIXME: find a better place to do this */
1022 sched
->cycle_close_time
= ktime_add_ns(base
, cycle
);
1024 first
->close_time
= ktime_add_ns(base
, first
->interval
);
1025 taprio_set_budget(q
, first
);
1026 rcu_assign_pointer(q
->current_entry
, NULL
);
1029 static void taprio_start_sched(struct Qdisc
*sch
,
1030 ktime_t start
, struct sched_gate_list
*new)
1032 struct taprio_sched
*q
= qdisc_priv(sch
);
1035 if (FULL_OFFLOAD_IS_ENABLED(q
->flags
))
1038 expires
= hrtimer_get_expires(&q
->advance_timer
);
1040 expires
= KTIME_MAX
;
1042 /* If the new schedule starts before the next expiration, we
1043 * reprogram it to the earliest one, so we change the admin
1044 * schedule to the operational one at the right time.
1046 start
= min_t(ktime_t
, start
, expires
);
1048 hrtimer_start(&q
->advance_timer
, start
, HRTIMER_MODE_ABS
);
1051 static void taprio_set_picos_per_byte(struct net_device
*dev
,
1052 struct taprio_sched
*q
)
1054 struct ethtool_link_ksettings ecmd
;
1055 int speed
= SPEED_10
;
1059 err
= __ethtool_get_link_ksettings(dev
, &ecmd
);
1063 if (ecmd
.base
.speed
&& ecmd
.base
.speed
!= SPEED_UNKNOWN
)
1064 speed
= ecmd
.base
.speed
;
1067 picos_per_byte
= (USEC_PER_SEC
* 8) / speed
;
1069 atomic64_set(&q
->picos_per_byte
, picos_per_byte
);
1070 netdev_dbg(dev
, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
1071 dev
->name
, (long long)atomic64_read(&q
->picos_per_byte
),
1075 static int taprio_dev_notifier(struct notifier_block
*nb
, unsigned long event
,
1078 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1079 struct net_device
*qdev
;
1080 struct taprio_sched
*q
;
1085 if (event
!= NETDEV_UP
&& event
!= NETDEV_CHANGE
)
1088 spin_lock(&taprio_list_lock
);
1089 list_for_each_entry(q
, &taprio_list
, taprio_list
) {
1090 qdev
= qdisc_dev(q
->root
);
1096 spin_unlock(&taprio_list_lock
);
1099 taprio_set_picos_per_byte(dev
, q
);
1104 static void setup_txtime(struct taprio_sched
*q
,
1105 struct sched_gate_list
*sched
, ktime_t base
)
1107 struct sched_entry
*entry
;
1110 list_for_each_entry(entry
, &sched
->entries
, list
) {
1111 entry
->next_txtime
= ktime_add_ns(base
, interval
);
1112 interval
+= entry
->interval
;
1116 static struct tc_taprio_qopt_offload
*taprio_offload_alloc(int num_entries
)
1118 struct __tc_taprio_qopt_offload
*__offload
;
1120 __offload
= kzalloc(struct_size(__offload
, offload
.entries
, num_entries
),
1125 refcount_set(&__offload
->users
, 1);
1127 return &__offload
->offload
;
1130 struct tc_taprio_qopt_offload
*taprio_offload_get(struct tc_taprio_qopt_offload
1133 struct __tc_taprio_qopt_offload
*__offload
;
1135 __offload
= container_of(offload
, struct __tc_taprio_qopt_offload
,
1138 refcount_inc(&__offload
->users
);
1142 EXPORT_SYMBOL_GPL(taprio_offload_get
);
1144 void taprio_offload_free(struct tc_taprio_qopt_offload
*offload
)
1146 struct __tc_taprio_qopt_offload
*__offload
;
1148 __offload
= container_of(offload
, struct __tc_taprio_qopt_offload
,
1151 if (!refcount_dec_and_test(&__offload
->users
))
1156 EXPORT_SYMBOL_GPL(taprio_offload_free
);
1158 /* The function will only serve to keep the pointers to the "oper" and "admin"
1159 * schedules valid in relation to their base times, so when calling dump() the
1160 * users looks at the right schedules.
1161 * When using full offload, the admin configuration is promoted to oper at the
1162 * base_time in the PHC time domain. But because the system time is not
1163 * necessarily in sync with that, we can't just trigger a hrtimer to call
1164 * switch_schedules at the right hardware time.
1165 * At the moment we call this by hand right away from taprio, but in the future
1166 * it will be useful to create a mechanism for drivers to notify taprio of the
1167 * offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump().
1168 * This is left as TODO.
1170 static void taprio_offload_config_changed(struct taprio_sched
*q
)
1172 struct sched_gate_list
*oper
, *admin
;
1174 spin_lock(&q
->current_entry_lock
);
1176 oper
= rcu_dereference_protected(q
->oper_sched
,
1177 lockdep_is_held(&q
->current_entry_lock
));
1178 admin
= rcu_dereference_protected(q
->admin_sched
,
1179 lockdep_is_held(&q
->current_entry_lock
));
1181 switch_schedules(q
, &admin
, &oper
);
1183 spin_unlock(&q
->current_entry_lock
);
1186 static u32
tc_map_to_queue_mask(struct net_device
*dev
, u32 tc_mask
)
1188 u32 i
, queue_mask
= 0;
1190 for (i
= 0; i
< dev
->num_tc
; i
++) {
1193 if (!(tc_mask
& BIT(i
)))
1196 offset
= dev
->tc_to_txq
[i
].offset
;
1197 count
= dev
->tc_to_txq
[i
].count
;
1199 queue_mask
|= GENMASK(offset
+ count
- 1, offset
);
1205 static void taprio_sched_to_offload(struct net_device
*dev
,
1206 struct sched_gate_list
*sched
,
1207 struct tc_taprio_qopt_offload
*offload
)
1209 struct sched_entry
*entry
;
1212 offload
->base_time
= sched
->base_time
;
1213 offload
->cycle_time
= sched
->cycle_time
;
1214 offload
->cycle_time_extension
= sched
->cycle_time_extension
;
1216 list_for_each_entry(entry
, &sched
->entries
, list
) {
1217 struct tc_taprio_sched_entry
*e
= &offload
->entries
[i
];
1219 e
->command
= entry
->command
;
1220 e
->interval
= entry
->interval
;
1221 e
->gate_mask
= tc_map_to_queue_mask(dev
, entry
->gate_mask
);
1226 offload
->num_entries
= i
;
1229 static int taprio_enable_offload(struct net_device
*dev
,
1230 struct taprio_sched
*q
,
1231 struct sched_gate_list
*sched
,
1232 struct netlink_ext_ack
*extack
)
1234 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1235 struct tc_taprio_qopt_offload
*offload
;
1238 if (!ops
->ndo_setup_tc
) {
1239 NL_SET_ERR_MSG(extack
,
1240 "Device does not support taprio offload");
1244 offload
= taprio_offload_alloc(sched
->num_entries
);
1246 NL_SET_ERR_MSG(extack
,
1247 "Not enough memory for enabling offload mode");
1250 offload
->enable
= 1;
1251 taprio_sched_to_offload(dev
, sched
, offload
);
1253 err
= ops
->ndo_setup_tc(dev
, TC_SETUP_QDISC_TAPRIO
, offload
);
1255 NL_SET_ERR_MSG(extack
,
1256 "Device failed to setup taprio offload");
1261 taprio_offload_free(offload
);
1266 static int taprio_disable_offload(struct net_device
*dev
,
1267 struct taprio_sched
*q
,
1268 struct netlink_ext_ack
*extack
)
1270 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1271 struct tc_taprio_qopt_offload
*offload
;
1274 if (!FULL_OFFLOAD_IS_ENABLED(q
->flags
))
1277 if (!ops
->ndo_setup_tc
)
1280 offload
= taprio_offload_alloc(0);
1282 NL_SET_ERR_MSG(extack
,
1283 "Not enough memory to disable offload mode");
1286 offload
->enable
= 0;
1288 err
= ops
->ndo_setup_tc(dev
, TC_SETUP_QDISC_TAPRIO
, offload
);
1290 NL_SET_ERR_MSG(extack
,
1291 "Device failed to disable offload");
1296 taprio_offload_free(offload
);
1301 /* If full offload is enabled, the only possible clockid is the net device's
1302 * PHC. For that reason, specifying a clockid through netlink is incorrect.
1303 * For txtime-assist, it is implicitly assumed that the device's PHC is kept
1304 * in sync with the specified clockid via a user space daemon such as phc2sys.
1305 * For both software taprio and txtime-assist, the clockid is used for the
1306 * hrtimer that advances the schedule and hence mandatory.
1308 static int taprio_parse_clockid(struct Qdisc
*sch
, struct nlattr
**tb
,
1309 struct netlink_ext_ack
*extack
)
1311 struct taprio_sched
*q
= qdisc_priv(sch
);
1312 struct net_device
*dev
= qdisc_dev(sch
);
1315 if (FULL_OFFLOAD_IS_ENABLED(q
->flags
)) {
1316 const struct ethtool_ops
*ops
= dev
->ethtool_ops
;
1317 struct ethtool_ts_info info
= {
1318 .cmd
= ETHTOOL_GET_TS_INFO
,
1322 if (tb
[TCA_TAPRIO_ATTR_SCHED_CLOCKID
]) {
1323 NL_SET_ERR_MSG(extack
,
1324 "The 'clockid' cannot be specified for full offload");
1328 if (ops
&& ops
->get_ts_info
)
1329 err
= ops
->get_ts_info(dev
, &info
);
1331 if (err
|| info
.phc_index
< 0) {
1332 NL_SET_ERR_MSG(extack
,
1333 "Device does not have a PTP clock");
1337 } else if (tb
[TCA_TAPRIO_ATTR_SCHED_CLOCKID
]) {
1338 int clockid
= nla_get_s32(tb
[TCA_TAPRIO_ATTR_SCHED_CLOCKID
]);
1340 /* We only support static clockids and we don't allow
1341 * for it to be modified after the first init.
1344 (q
->clockid
!= -1 && q
->clockid
!= clockid
)) {
1345 NL_SET_ERR_MSG(extack
,
1346 "Changing the 'clockid' of a running schedule is not supported");
1352 case CLOCK_REALTIME
:
1353 q
->tk_offset
= TK_OFFS_REAL
;
1355 case CLOCK_MONOTONIC
:
1356 q
->tk_offset
= TK_OFFS_MAX
;
1358 case CLOCK_BOOTTIME
:
1359 q
->tk_offset
= TK_OFFS_BOOT
;
1362 q
->tk_offset
= TK_OFFS_TAI
;
1365 NL_SET_ERR_MSG(extack
, "Invalid 'clockid'");
1370 q
->clockid
= clockid
;
1372 NL_SET_ERR_MSG(extack
, "Specifying a 'clockid' is mandatory");
1376 /* Everything went ok, return success. */
1383 static int taprio_mqprio_cmp(const struct net_device
*dev
,
1384 const struct tc_mqprio_qopt
*mqprio
)
1388 if (!mqprio
|| mqprio
->num_tc
!= dev
->num_tc
)
1391 for (i
= 0; i
< mqprio
->num_tc
; i
++)
1392 if (dev
->tc_to_txq
[i
].count
!= mqprio
->count
[i
] ||
1393 dev
->tc_to_txq
[i
].offset
!= mqprio
->offset
[i
])
1396 for (i
= 0; i
<= TC_BITMASK
; i
++)
1397 if (dev
->prio_tc_map
[i
] != mqprio
->prio_tc_map
[i
])
1403 /* The semantics of the 'flags' argument in relation to 'change()'
1404 * requests, are interpreted following two rules (which are applied in
1405 * this order): (1) an omitted 'flags' argument is interpreted as
1406 * zero; (2) the 'flags' of a "running" taprio instance cannot be
1409 static int taprio_new_flags(const struct nlattr
*attr
, u32 old
,
1410 struct netlink_ext_ack
*extack
)
1415 new = nla_get_u32(attr
);
1417 if (old
!= TAPRIO_FLAGS_INVALID
&& old
!= new) {
1418 NL_SET_ERR_MSG_MOD(extack
, "Changing 'flags' of a running schedule is not supported");
1422 if (!taprio_flags_valid(new)) {
1423 NL_SET_ERR_MSG_MOD(extack
, "Specified 'flags' are not valid");
1430 static int taprio_change(struct Qdisc
*sch
, struct nlattr
*opt
,
1431 struct netlink_ext_ack
*extack
)
1433 struct nlattr
*tb
[TCA_TAPRIO_ATTR_MAX
+ 1] = { };
1434 struct sched_gate_list
*oper
, *admin
, *new_admin
;
1435 struct taprio_sched
*q
= qdisc_priv(sch
);
1436 struct net_device
*dev
= qdisc_dev(sch
);
1437 struct tc_mqprio_qopt
*mqprio
= NULL
;
1438 unsigned long flags
;
1442 err
= nla_parse_nested_deprecated(tb
, TCA_TAPRIO_ATTR_MAX
, opt
,
1443 taprio_policy
, extack
);
1447 if (tb
[TCA_TAPRIO_ATTR_PRIOMAP
])
1448 mqprio
= nla_data(tb
[TCA_TAPRIO_ATTR_PRIOMAP
]);
1450 err
= taprio_new_flags(tb
[TCA_TAPRIO_ATTR_FLAGS
],
1457 err
= taprio_parse_mqprio_opt(dev
, mqprio
, extack
, q
->flags
);
1461 new_admin
= kzalloc(sizeof(*new_admin
), GFP_KERNEL
);
1463 NL_SET_ERR_MSG(extack
, "Not enough memory for a new schedule");
1466 INIT_LIST_HEAD(&new_admin
->entries
);
1469 oper
= rcu_dereference(q
->oper_sched
);
1470 admin
= rcu_dereference(q
->admin_sched
);
1473 /* no changes - no new mqprio settings */
1474 if (!taprio_mqprio_cmp(dev
, mqprio
))
1477 if (mqprio
&& (oper
|| admin
)) {
1478 NL_SET_ERR_MSG(extack
, "Changing the traffic mapping of a running schedule is not supported");
1483 err
= parse_taprio_schedule(q
, tb
, new_admin
, extack
);
1487 if (new_admin
->num_entries
== 0) {
1488 NL_SET_ERR_MSG(extack
, "There should be at least one entry in the schedule");
1493 err
= taprio_parse_clockid(sch
, tb
, extack
);
1497 taprio_set_picos_per_byte(dev
, q
);
1500 netdev_set_num_tc(dev
, mqprio
->num_tc
);
1501 for (i
= 0; i
< mqprio
->num_tc
; i
++)
1502 netdev_set_tc_queue(dev
, i
,
1506 /* Always use supplied priority mappings */
1507 for (i
= 0; i
<= TC_BITMASK
; i
++)
1508 netdev_set_prio_tc_map(dev
, i
,
1509 mqprio
->prio_tc_map
[i
]);
1512 if (FULL_OFFLOAD_IS_ENABLED(q
->flags
))
1513 err
= taprio_enable_offload(dev
, q
, new_admin
, extack
);
1515 err
= taprio_disable_offload(dev
, q
, extack
);
1519 /* Protects against enqueue()/dequeue() */
1520 spin_lock_bh(qdisc_lock(sch
));
1522 if (tb
[TCA_TAPRIO_ATTR_TXTIME_DELAY
]) {
1523 if (!TXTIME_ASSIST_IS_ENABLED(q
->flags
)) {
1524 NL_SET_ERR_MSG_MOD(extack
, "txtime-delay can only be set when txtime-assist mode is enabled");
1529 q
->txtime_delay
= nla_get_u32(tb
[TCA_TAPRIO_ATTR_TXTIME_DELAY
]);
1532 if (!TXTIME_ASSIST_IS_ENABLED(q
->flags
) &&
1533 !FULL_OFFLOAD_IS_ENABLED(q
->flags
) &&
1534 !hrtimer_active(&q
->advance_timer
)) {
1535 hrtimer_init(&q
->advance_timer
, q
->clockid
, HRTIMER_MODE_ABS
);
1536 q
->advance_timer
.function
= advance_sched
;
1539 if (FULL_OFFLOAD_IS_ENABLED(q
->flags
)) {
1540 q
->dequeue
= taprio_dequeue_offload
;
1541 q
->peek
= taprio_peek_offload
;
1543 /* Be sure to always keep the function pointers
1544 * in a consistent state.
1546 q
->dequeue
= taprio_dequeue_soft
;
1547 q
->peek
= taprio_peek_soft
;
1550 err
= taprio_get_start_time(sch
, new_admin
, &start
);
1552 NL_SET_ERR_MSG(extack
, "Internal error: failed get start time");
1556 setup_txtime(q
, new_admin
, start
);
1558 if (TXTIME_ASSIST_IS_ENABLED(q
->flags
)) {
1560 rcu_assign_pointer(q
->oper_sched
, new_admin
);
1566 rcu_assign_pointer(q
->admin_sched
, new_admin
);
1568 call_rcu(&admin
->rcu
, taprio_free_sched_cb
);
1570 setup_first_close_time(q
, new_admin
, start
);
1572 /* Protects against advance_sched() */
1573 spin_lock_irqsave(&q
->current_entry_lock
, flags
);
1575 taprio_start_sched(sch
, start
, new_admin
);
1577 rcu_assign_pointer(q
->admin_sched
, new_admin
);
1579 call_rcu(&admin
->rcu
, taprio_free_sched_cb
);
1581 spin_unlock_irqrestore(&q
->current_entry_lock
, flags
);
1583 if (FULL_OFFLOAD_IS_ENABLED(q
->flags
))
1584 taprio_offload_config_changed(q
);
1591 spin_unlock_bh(qdisc_lock(sch
));
1595 call_rcu(&new_admin
->rcu
, taprio_free_sched_cb
);
1600 static void taprio_reset(struct Qdisc
*sch
)
1602 struct taprio_sched
*q
= qdisc_priv(sch
);
1603 struct net_device
*dev
= qdisc_dev(sch
);
1606 hrtimer_cancel(&q
->advance_timer
);
1608 for (i
= 0; i
< dev
->num_tx_queues
&& q
->qdiscs
[i
]; i
++)
1609 qdisc_reset(q
->qdiscs
[i
]);
1611 sch
->qstats
.backlog
= 0;
1615 static void taprio_destroy(struct Qdisc
*sch
)
1617 struct taprio_sched
*q
= qdisc_priv(sch
);
1618 struct net_device
*dev
= qdisc_dev(sch
);
1621 spin_lock(&taprio_list_lock
);
1622 list_del(&q
->taprio_list
);
1623 spin_unlock(&taprio_list_lock
);
1626 taprio_disable_offload(dev
, q
, NULL
);
1629 for (i
= 0; i
< dev
->num_tx_queues
&& q
->qdiscs
[i
]; i
++)
1630 qdisc_put(q
->qdiscs
[i
]);
1636 netdev_reset_tc(dev
);
1639 call_rcu(&q
->oper_sched
->rcu
, taprio_free_sched_cb
);
1642 call_rcu(&q
->admin_sched
->rcu
, taprio_free_sched_cb
);
1645 static int taprio_init(struct Qdisc
*sch
, struct nlattr
*opt
,
1646 struct netlink_ext_ack
*extack
)
1648 struct taprio_sched
*q
= qdisc_priv(sch
);
1649 struct net_device
*dev
= qdisc_dev(sch
);
1652 spin_lock_init(&q
->current_entry_lock
);
1654 hrtimer_init(&q
->advance_timer
, CLOCK_TAI
, HRTIMER_MODE_ABS
);
1655 q
->advance_timer
.function
= advance_sched
;
1657 q
->dequeue
= taprio_dequeue_soft
;
1658 q
->peek
= taprio_peek_soft
;
1662 /* We only support static clockids. Use an invalid value as default
1663 * and get the valid one on taprio_change().
1666 q
->flags
= TAPRIO_FLAGS_INVALID
;
1668 spin_lock(&taprio_list_lock
);
1669 list_add(&q
->taprio_list
, &taprio_list
);
1670 spin_unlock(&taprio_list_lock
);
1672 if (sch
->parent
!= TC_H_ROOT
)
1675 if (!netif_is_multiqueue(dev
))
1678 /* pre-allocate qdisc, attachment can't fail */
1679 q
->qdiscs
= kcalloc(dev
->num_tx_queues
,
1680 sizeof(q
->qdiscs
[0]),
1689 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
1690 struct netdev_queue
*dev_queue
;
1691 struct Qdisc
*qdisc
;
1693 dev_queue
= netdev_get_tx_queue(dev
, i
);
1694 qdisc
= qdisc_create_dflt(dev_queue
,
1696 TC_H_MAKE(TC_H_MAJ(sch
->handle
),
1702 if (i
< dev
->real_num_tx_queues
)
1703 qdisc_hash_add(qdisc
, false);
1705 q
->qdiscs
[i
] = qdisc
;
1708 return taprio_change(sch
, opt
, extack
);
1711 static struct netdev_queue
*taprio_queue_get(struct Qdisc
*sch
,
1714 struct net_device
*dev
= qdisc_dev(sch
);
1715 unsigned long ntx
= cl
- 1;
1717 if (ntx
>= dev
->num_tx_queues
)
1720 return netdev_get_tx_queue(dev
, ntx
);
1723 static int taprio_graft(struct Qdisc
*sch
, unsigned long cl
,
1724 struct Qdisc
*new, struct Qdisc
**old
,
1725 struct netlink_ext_ack
*extack
)
1727 struct taprio_sched
*q
= qdisc_priv(sch
);
1728 struct net_device
*dev
= qdisc_dev(sch
);
1729 struct netdev_queue
*dev_queue
= taprio_queue_get(sch
, cl
);
1734 if (dev
->flags
& IFF_UP
)
1735 dev_deactivate(dev
);
1737 *old
= q
->qdiscs
[cl
- 1];
1738 q
->qdiscs
[cl
- 1] = new;
1741 new->flags
|= TCQ_F_ONETXQUEUE
| TCQ_F_NOPARENT
;
1743 if (dev
->flags
& IFF_UP
)
1749 static int dump_entry(struct sk_buff
*msg
,
1750 const struct sched_entry
*entry
)
1752 struct nlattr
*item
;
1754 item
= nla_nest_start_noflag(msg
, TCA_TAPRIO_SCHED_ENTRY
);
1758 if (nla_put_u32(msg
, TCA_TAPRIO_SCHED_ENTRY_INDEX
, entry
->index
))
1759 goto nla_put_failure
;
1761 if (nla_put_u8(msg
, TCA_TAPRIO_SCHED_ENTRY_CMD
, entry
->command
))
1762 goto nla_put_failure
;
1764 if (nla_put_u32(msg
, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK
,
1766 goto nla_put_failure
;
1768 if (nla_put_u32(msg
, TCA_TAPRIO_SCHED_ENTRY_INTERVAL
,
1770 goto nla_put_failure
;
1772 return nla_nest_end(msg
, item
);
1775 nla_nest_cancel(msg
, item
);
1779 static int dump_schedule(struct sk_buff
*msg
,
1780 const struct sched_gate_list
*root
)
1782 struct nlattr
*entry_list
;
1783 struct sched_entry
*entry
;
1785 if (nla_put_s64(msg
, TCA_TAPRIO_ATTR_SCHED_BASE_TIME
,
1786 root
->base_time
, TCA_TAPRIO_PAD
))
1789 if (nla_put_s64(msg
, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME
,
1790 root
->cycle_time
, TCA_TAPRIO_PAD
))
1793 if (nla_put_s64(msg
, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION
,
1794 root
->cycle_time_extension
, TCA_TAPRIO_PAD
))
1797 entry_list
= nla_nest_start_noflag(msg
,
1798 TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST
);
1802 list_for_each_entry(entry
, &root
->entries
, list
) {
1803 if (dump_entry(msg
, entry
) < 0)
1807 nla_nest_end(msg
, entry_list
);
1811 nla_nest_cancel(msg
, entry_list
);
1815 static int taprio_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
1817 struct taprio_sched
*q
= qdisc_priv(sch
);
1818 struct net_device
*dev
= qdisc_dev(sch
);
1819 struct sched_gate_list
*oper
, *admin
;
1820 struct tc_mqprio_qopt opt
= { 0 };
1821 struct nlattr
*nest
, *sched_nest
;
1825 oper
= rcu_dereference(q
->oper_sched
);
1826 admin
= rcu_dereference(q
->admin_sched
);
1828 opt
.num_tc
= netdev_get_num_tc(dev
);
1829 memcpy(opt
.prio_tc_map
, dev
->prio_tc_map
, sizeof(opt
.prio_tc_map
));
1831 for (i
= 0; i
< netdev_get_num_tc(dev
); i
++) {
1832 opt
.count
[i
] = dev
->tc_to_txq
[i
].count
;
1833 opt
.offset
[i
] = dev
->tc_to_txq
[i
].offset
;
1836 nest
= nla_nest_start_noflag(skb
, TCA_OPTIONS
);
1840 if (nla_put(skb
, TCA_TAPRIO_ATTR_PRIOMAP
, sizeof(opt
), &opt
))
1843 if (!FULL_OFFLOAD_IS_ENABLED(q
->flags
) &&
1844 nla_put_s32(skb
, TCA_TAPRIO_ATTR_SCHED_CLOCKID
, q
->clockid
))
1847 if (q
->flags
&& nla_put_u32(skb
, TCA_TAPRIO_ATTR_FLAGS
, q
->flags
))
1850 if (q
->txtime_delay
&&
1851 nla_put_u32(skb
, TCA_TAPRIO_ATTR_TXTIME_DELAY
, q
->txtime_delay
))
1854 if (oper
&& dump_schedule(skb
, oper
))
1860 sched_nest
= nla_nest_start_noflag(skb
, TCA_TAPRIO_ATTR_ADMIN_SCHED
);
1864 if (dump_schedule(skb
, admin
))
1867 nla_nest_end(skb
, sched_nest
);
1872 return nla_nest_end(skb
, nest
);
1875 nla_nest_cancel(skb
, sched_nest
);
1878 nla_nest_cancel(skb
, nest
);
1885 static struct Qdisc
*taprio_leaf(struct Qdisc
*sch
, unsigned long cl
)
1887 struct netdev_queue
*dev_queue
= taprio_queue_get(sch
, cl
);
1892 return dev_queue
->qdisc_sleeping
;
1895 static unsigned long taprio_find(struct Qdisc
*sch
, u32 classid
)
1897 unsigned int ntx
= TC_H_MIN(classid
);
1899 if (!taprio_queue_get(sch
, ntx
))
1904 static int taprio_dump_class(struct Qdisc
*sch
, unsigned long cl
,
1905 struct sk_buff
*skb
, struct tcmsg
*tcm
)
1907 struct netdev_queue
*dev_queue
= taprio_queue_get(sch
, cl
);
1909 tcm
->tcm_parent
= TC_H_ROOT
;
1910 tcm
->tcm_handle
|= TC_H_MIN(cl
);
1911 tcm
->tcm_info
= dev_queue
->qdisc_sleeping
->handle
;
1916 static int taprio_dump_class_stats(struct Qdisc
*sch
, unsigned long cl
,
1917 struct gnet_dump
*d
)
1921 struct netdev_queue
*dev_queue
= taprio_queue_get(sch
, cl
);
1923 sch
= dev_queue
->qdisc_sleeping
;
1924 if (gnet_stats_copy_basic(&sch
->running
, d
, NULL
, &sch
->bstats
) < 0 ||
1925 qdisc_qstats_copy(d
, sch
) < 0)
1930 static void taprio_walk(struct Qdisc
*sch
, struct qdisc_walker
*arg
)
1932 struct net_device
*dev
= qdisc_dev(sch
);
1938 arg
->count
= arg
->skip
;
1939 for (ntx
= arg
->skip
; ntx
< dev
->num_tx_queues
; ntx
++) {
1940 if (arg
->fn(sch
, ntx
+ 1, arg
) < 0) {
1948 static struct netdev_queue
*taprio_select_queue(struct Qdisc
*sch
,
1951 return taprio_queue_get(sch
, TC_H_MIN(tcm
->tcm_parent
));
1954 static const struct Qdisc_class_ops taprio_class_ops
= {
1955 .graft
= taprio_graft
,
1956 .leaf
= taprio_leaf
,
1957 .find
= taprio_find
,
1958 .walk
= taprio_walk
,
1959 .dump
= taprio_dump_class
,
1960 .dump_stats
= taprio_dump_class_stats
,
1961 .select_queue
= taprio_select_queue
,
1964 static struct Qdisc_ops taprio_qdisc_ops __read_mostly
= {
1965 .cl_ops
= &taprio_class_ops
,
1967 .priv_size
= sizeof(struct taprio_sched
),
1968 .init
= taprio_init
,
1969 .change
= taprio_change
,
1970 .destroy
= taprio_destroy
,
1971 .reset
= taprio_reset
,
1972 .peek
= taprio_peek
,
1973 .dequeue
= taprio_dequeue
,
1974 .enqueue
= taprio_enqueue
,
1975 .dump
= taprio_dump
,
1976 .owner
= THIS_MODULE
,
1979 static struct notifier_block taprio_device_notifier
= {
1980 .notifier_call
= taprio_dev_notifier
,
1983 static int __init
taprio_module_init(void)
1985 int err
= register_netdevice_notifier(&taprio_device_notifier
);
1990 return register_qdisc(&taprio_qdisc_ops
);
1993 static void __exit
taprio_module_exit(void)
1995 unregister_qdisc(&taprio_qdisc_ops
);
1996 unregister_netdevice_notifier(&taprio_device_notifier
);
1999 module_init(taprio_module_init
);
2000 module_exit(taprio_module_exit
);
2001 MODULE_LICENSE("GPL");